]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/btrfs/async-thread.c
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 * Copyright (C) 2014 Fujitsu. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/freezer.h>
25 #include <linux/workqueue.h>
26 #include "async-thread.h"
28 #define WORK_QUEUED_BIT 0
29 #define WORK_DONE_BIT 1
30 #define WORK_ORDER_DONE_BIT 2
31 #define WORK_HIGH_PRIO_BIT 3
33 #define NO_THRESHOLD (-1)
34 #define DFT_THRESHOLD (32)
37 * container for the kthread task pointer and the list of pending work
38 * One of these is allocated per thread.
40 struct btrfs_worker_thread
{
41 /* pool we belong to */
42 struct btrfs_workers
*workers
;
44 /* list of struct btrfs_work that are waiting for service */
45 struct list_head pending
;
46 struct list_head prio_pending
;
48 /* list of worker threads from struct btrfs_workers */
49 struct list_head worker_list
;
52 struct task_struct
*task
;
54 /* number of things on the pending list */
57 /* reference counter for this struct */
60 unsigned long sequence
;
62 /* protects the pending list. */
65 /* set to non-zero when this thread is already awake and kicking */
68 /* are we currently idle */
72 static int __btrfs_start_workers(struct btrfs_workers
*workers
);
75 * btrfs_start_workers uses kthread_run, which can block waiting for memory
76 * for a very long time. It will actually throttle on page writeback,
77 * and so it may not make progress until after our btrfs worker threads
78 * process all of the pending work structs in their queue
80 * This means we can't use btrfs_start_workers from inside a btrfs worker
81 * thread that is used as part of cleaning dirty memory, which pretty much
82 * involves all of the worker threads.
84 * Instead we have a helper queue who never has more than one thread
85 * where we scheduler thread start operations. This worker_start struct
86 * is used to contain the work and hold a pointer to the queue that needs
90 struct btrfs_work work
;
91 struct btrfs_workers
*queue
;
94 static void start_new_worker_func(struct btrfs_work
*work
)
96 struct worker_start
*start
;
97 start
= container_of(work
, struct worker_start
, work
);
98 __btrfs_start_workers(start
->queue
);
103 * helper function to move a thread onto the idle list after it
104 * has finished some requests.
106 static void check_idle_worker(struct btrfs_worker_thread
*worker
)
108 if (!worker
->idle
&& atomic_read(&worker
->num_pending
) <
109 worker
->workers
->idle_thresh
/ 2) {
111 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
114 /* the list may be empty if the worker is just starting */
115 if (!list_empty(&worker
->worker_list
) &&
116 !worker
->workers
->stopping
) {
117 list_move(&worker
->worker_list
,
118 &worker
->workers
->idle_list
);
120 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
125 * helper function to move a thread off the idle list after new
126 * pending work is added.
128 static void check_busy_worker(struct btrfs_worker_thread
*worker
)
130 if (worker
->idle
&& atomic_read(&worker
->num_pending
) >=
131 worker
->workers
->idle_thresh
) {
133 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
136 if (!list_empty(&worker
->worker_list
) &&
137 !worker
->workers
->stopping
) {
138 list_move_tail(&worker
->worker_list
,
139 &worker
->workers
->worker_list
);
141 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
145 static void check_pending_worker_creates(struct btrfs_worker_thread
*worker
)
147 struct btrfs_workers
*workers
= worker
->workers
;
148 struct worker_start
*start
;
152 if (!workers
->atomic_start_pending
)
155 start
= kzalloc(sizeof(*start
), GFP_NOFS
);
159 start
->work
.func
= start_new_worker_func
;
160 start
->queue
= workers
;
162 spin_lock_irqsave(&workers
->lock
, flags
);
163 if (!workers
->atomic_start_pending
)
166 workers
->atomic_start_pending
= 0;
167 if (workers
->num_workers
+ workers
->num_workers_starting
>=
168 workers
->max_workers
)
171 workers
->num_workers_starting
+= 1;
172 spin_unlock_irqrestore(&workers
->lock
, flags
);
173 btrfs_queue_worker(workers
->atomic_worker_start
, &start
->work
);
178 spin_unlock_irqrestore(&workers
->lock
, flags
);
181 static noinline
void run_ordered_completions(struct btrfs_workers
*workers
,
182 struct btrfs_work
*work
)
184 if (!workers
->ordered
)
187 set_bit(WORK_DONE_BIT
, &work
->flags
);
189 spin_lock(&workers
->order_lock
);
192 if (!list_empty(&workers
->prio_order_list
)) {
193 work
= list_entry(workers
->prio_order_list
.next
,
194 struct btrfs_work
, order_list
);
195 } else if (!list_empty(&workers
->order_list
)) {
196 work
= list_entry(workers
->order_list
.next
,
197 struct btrfs_work
, order_list
);
201 if (!test_bit(WORK_DONE_BIT
, &work
->flags
))
204 /* we are going to call the ordered done function, but
205 * we leave the work item on the list as a barrier so
206 * that later work items that are done don't have their
207 * functions called before this one returns
209 if (test_and_set_bit(WORK_ORDER_DONE_BIT
, &work
->flags
))
212 spin_unlock(&workers
->order_lock
);
214 work
->ordered_func(work
);
216 /* now take the lock again and drop our item from the list */
217 spin_lock(&workers
->order_lock
);
218 list_del(&work
->order_list
);
219 spin_unlock(&workers
->order_lock
);
222 * we don't want to call the ordered free functions
223 * with the lock held though
225 work
->ordered_free(work
);
226 spin_lock(&workers
->order_lock
);
229 spin_unlock(&workers
->order_lock
);
232 static void put_worker(struct btrfs_worker_thread
*worker
)
234 if (atomic_dec_and_test(&worker
->refs
))
238 static int try_worker_shutdown(struct btrfs_worker_thread
*worker
)
242 spin_lock_irq(&worker
->lock
);
243 spin_lock(&worker
->workers
->lock
);
244 if (worker
->workers
->num_workers
> 1 &&
247 !list_empty(&worker
->worker_list
) &&
248 list_empty(&worker
->prio_pending
) &&
249 list_empty(&worker
->pending
) &&
250 atomic_read(&worker
->num_pending
) == 0) {
252 list_del_init(&worker
->worker_list
);
253 worker
->workers
->num_workers
--;
255 spin_unlock(&worker
->workers
->lock
);
256 spin_unlock_irq(&worker
->lock
);
263 static struct btrfs_work
*get_next_work(struct btrfs_worker_thread
*worker
,
264 struct list_head
*prio_head
,
265 struct list_head
*head
)
267 struct btrfs_work
*work
= NULL
;
268 struct list_head
*cur
= NULL
;
270 if (!list_empty(prio_head
)) {
271 cur
= prio_head
->next
;
276 if (!list_empty(&worker
->prio_pending
))
279 if (!list_empty(head
)) {
285 spin_lock_irq(&worker
->lock
);
286 list_splice_tail_init(&worker
->prio_pending
, prio_head
);
287 list_splice_tail_init(&worker
->pending
, head
);
289 if (!list_empty(prio_head
))
290 cur
= prio_head
->next
;
291 else if (!list_empty(head
))
293 spin_unlock_irq(&worker
->lock
);
299 work
= list_entry(cur
, struct btrfs_work
, list
);
306 * main loop for servicing work items
308 static int worker_loop(void *arg
)
310 struct btrfs_worker_thread
*worker
= arg
;
311 struct list_head head
;
312 struct list_head prio_head
;
313 struct btrfs_work
*work
;
315 INIT_LIST_HEAD(&head
);
316 INIT_LIST_HEAD(&prio_head
);
323 work
= get_next_work(worker
, &prio_head
, &head
);
327 list_del(&work
->list
);
328 clear_bit(WORK_QUEUED_BIT
, &work
->flags
);
330 work
->worker
= worker
;
334 atomic_dec(&worker
->num_pending
);
336 * unless this is an ordered work queue,
337 * 'work' was probably freed by func above.
339 run_ordered_completions(worker
->workers
, work
);
341 check_pending_worker_creates(worker
);
345 spin_lock_irq(&worker
->lock
);
346 check_idle_worker(worker
);
348 if (freezing(current
)) {
350 spin_unlock_irq(&worker
->lock
);
353 spin_unlock_irq(&worker
->lock
);
354 if (!kthread_should_stop()) {
357 * we've dropped the lock, did someone else
361 if (!list_empty(&worker
->pending
) ||
362 !list_empty(&worker
->prio_pending
))
366 * this short schedule allows more work to
367 * come in without the queue functions
368 * needing to go through wake_up_process()
370 * worker->working is still 1, so nobody
371 * is going to try and wake us up
375 if (!list_empty(&worker
->pending
) ||
376 !list_empty(&worker
->prio_pending
))
379 if (kthread_should_stop())
382 /* still no more work?, sleep for real */
383 spin_lock_irq(&worker
->lock
);
384 set_current_state(TASK_INTERRUPTIBLE
);
385 if (!list_empty(&worker
->pending
) ||
386 !list_empty(&worker
->prio_pending
)) {
387 spin_unlock_irq(&worker
->lock
);
388 set_current_state(TASK_RUNNING
);
393 * this makes sure we get a wakeup when someone
394 * adds something new to the queue
397 spin_unlock_irq(&worker
->lock
);
399 if (!kthread_should_stop()) {
400 schedule_timeout(HZ
* 120);
401 if (!worker
->working
&&
402 try_worker_shutdown(worker
)) {
407 __set_current_state(TASK_RUNNING
);
409 } while (!kthread_should_stop());
414 * this will wait for all the worker threads to shutdown
416 void btrfs_stop_workers(struct btrfs_workers
*workers
)
418 struct list_head
*cur
;
419 struct btrfs_worker_thread
*worker
;
422 spin_lock_irq(&workers
->lock
);
423 workers
->stopping
= 1;
424 list_splice_init(&workers
->idle_list
, &workers
->worker_list
);
425 while (!list_empty(&workers
->worker_list
)) {
426 cur
= workers
->worker_list
.next
;
427 worker
= list_entry(cur
, struct btrfs_worker_thread
,
430 atomic_inc(&worker
->refs
);
431 workers
->num_workers
-= 1;
432 if (!list_empty(&worker
->worker_list
)) {
433 list_del_init(&worker
->worker_list
);
438 spin_unlock_irq(&workers
->lock
);
440 kthread_stop(worker
->task
);
441 spin_lock_irq(&workers
->lock
);
444 spin_unlock_irq(&workers
->lock
);
448 * simple init on struct btrfs_workers
450 void btrfs_init_workers(struct btrfs_workers
*workers
, char *name
, int max
,
451 struct btrfs_workers
*async_helper
)
453 workers
->num_workers
= 0;
454 workers
->num_workers_starting
= 0;
455 INIT_LIST_HEAD(&workers
->worker_list
);
456 INIT_LIST_HEAD(&workers
->idle_list
);
457 INIT_LIST_HEAD(&workers
->order_list
);
458 INIT_LIST_HEAD(&workers
->prio_order_list
);
459 spin_lock_init(&workers
->lock
);
460 spin_lock_init(&workers
->order_lock
);
461 workers
->max_workers
= max
;
462 workers
->idle_thresh
= 32;
463 workers
->name
= name
;
464 workers
->ordered
= 0;
465 workers
->atomic_start_pending
= 0;
466 workers
->atomic_worker_start
= async_helper
;
467 workers
->stopping
= 0;
471 * starts new worker threads. This does not enforce the max worker
472 * count in case you need to temporarily go past it.
474 static int __btrfs_start_workers(struct btrfs_workers
*workers
)
476 struct btrfs_worker_thread
*worker
;
479 worker
= kzalloc(sizeof(*worker
), GFP_NOFS
);
485 INIT_LIST_HEAD(&worker
->pending
);
486 INIT_LIST_HEAD(&worker
->prio_pending
);
487 INIT_LIST_HEAD(&worker
->worker_list
);
488 spin_lock_init(&worker
->lock
);
490 atomic_set(&worker
->num_pending
, 0);
491 atomic_set(&worker
->refs
, 1);
492 worker
->workers
= workers
;
493 worker
->task
= kthread_create(worker_loop
, worker
,
494 "btrfs-%s-%d", workers
->name
,
495 workers
->num_workers
+ 1);
496 if (IS_ERR(worker
->task
)) {
497 ret
= PTR_ERR(worker
->task
);
501 spin_lock_irq(&workers
->lock
);
502 if (workers
->stopping
) {
503 spin_unlock_irq(&workers
->lock
);
507 list_add_tail(&worker
->worker_list
, &workers
->idle_list
);
509 workers
->num_workers
++;
510 workers
->num_workers_starting
--;
511 WARN_ON(workers
->num_workers_starting
< 0);
512 spin_unlock_irq(&workers
->lock
);
514 wake_up_process(worker
->task
);
518 kthread_stop(worker
->task
);
521 spin_lock_irq(&workers
->lock
);
522 workers
->num_workers_starting
--;
523 spin_unlock_irq(&workers
->lock
);
527 int btrfs_start_workers(struct btrfs_workers
*workers
)
529 spin_lock_irq(&workers
->lock
);
530 workers
->num_workers_starting
++;
531 spin_unlock_irq(&workers
->lock
);
532 return __btrfs_start_workers(workers
);
536 * run through the list and find a worker thread that doesn't have a lot
537 * to do right now. This can return null if we aren't yet at the thread
538 * count limit and all of the threads are busy.
540 static struct btrfs_worker_thread
*next_worker(struct btrfs_workers
*workers
)
542 struct btrfs_worker_thread
*worker
;
543 struct list_head
*next
;
546 enforce_min
= (workers
->num_workers
+ workers
->num_workers_starting
) <
547 workers
->max_workers
;
550 * if we find an idle thread, don't move it to the end of the
551 * idle list. This improves the chance that the next submission
552 * will reuse the same thread, and maybe catch it while it is still
555 if (!list_empty(&workers
->idle_list
)) {
556 next
= workers
->idle_list
.next
;
557 worker
= list_entry(next
, struct btrfs_worker_thread
,
561 if (enforce_min
|| list_empty(&workers
->worker_list
))
565 * if we pick a busy task, move the task to the end of the list.
566 * hopefully this will keep things somewhat evenly balanced.
567 * Do the move in batches based on the sequence number. This groups
568 * requests submitted at roughly the same time onto the same worker.
570 next
= workers
->worker_list
.next
;
571 worker
= list_entry(next
, struct btrfs_worker_thread
, worker_list
);
574 if (worker
->sequence
% workers
->idle_thresh
== 0)
575 list_move_tail(next
, &workers
->worker_list
);
580 * selects a worker thread to take the next job. This will either find
581 * an idle worker, start a new worker up to the max count, or just return
582 * one of the existing busy workers.
584 static struct btrfs_worker_thread
*find_worker(struct btrfs_workers
*workers
)
586 struct btrfs_worker_thread
*worker
;
588 struct list_head
*fallback
;
591 spin_lock_irqsave(&workers
->lock
, flags
);
593 worker
= next_worker(workers
);
596 if (workers
->num_workers
+ workers
->num_workers_starting
>=
597 workers
->max_workers
) {
599 } else if (workers
->atomic_worker_start
) {
600 workers
->atomic_start_pending
= 1;
603 workers
->num_workers_starting
++;
604 spin_unlock_irqrestore(&workers
->lock
, flags
);
605 /* we're below the limit, start another worker */
606 ret
= __btrfs_start_workers(workers
);
607 spin_lock_irqsave(&workers
->lock
, flags
);
618 * we have failed to find any workers, just
619 * return the first one we can find.
621 if (!list_empty(&workers
->worker_list
))
622 fallback
= workers
->worker_list
.next
;
623 if (!list_empty(&workers
->idle_list
))
624 fallback
= workers
->idle_list
.next
;
626 worker
= list_entry(fallback
,
627 struct btrfs_worker_thread
, worker_list
);
630 * this makes sure the worker doesn't exit before it is placed
631 * onto a busy/idle list
633 atomic_inc(&worker
->num_pending
);
634 spin_unlock_irqrestore(&workers
->lock
, flags
);
639 * btrfs_requeue_work just puts the work item back on the tail of the list
640 * it was taken from. It is intended for use with long running work functions
641 * that make some progress and want to give the cpu up for others.
643 void btrfs_requeue_work(struct btrfs_work
*work
)
645 struct btrfs_worker_thread
*worker
= work
->worker
;
649 if (test_and_set_bit(WORK_QUEUED_BIT
, &work
->flags
))
652 spin_lock_irqsave(&worker
->lock
, flags
);
653 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
))
654 list_add_tail(&work
->list
, &worker
->prio_pending
);
656 list_add_tail(&work
->list
, &worker
->pending
);
657 atomic_inc(&worker
->num_pending
);
659 /* by definition we're busy, take ourselves off the idle
663 spin_lock(&worker
->workers
->lock
);
665 list_move_tail(&worker
->worker_list
,
666 &worker
->workers
->worker_list
);
667 spin_unlock(&worker
->workers
->lock
);
669 if (!worker
->working
) {
675 wake_up_process(worker
->task
);
676 spin_unlock_irqrestore(&worker
->lock
, flags
);
679 void btrfs_set_work_high_prio(struct btrfs_work
*work
)
681 set_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
);
685 * places a struct btrfs_work into the pending queue of one of the kthreads
687 void btrfs_queue_worker(struct btrfs_workers
*workers
, struct btrfs_work
*work
)
689 struct btrfs_worker_thread
*worker
;
693 /* don't requeue something already on a list */
694 if (test_and_set_bit(WORK_QUEUED_BIT
, &work
->flags
))
697 worker
= find_worker(workers
);
698 if (workers
->ordered
) {
700 * you're not allowed to do ordered queues from an
703 spin_lock(&workers
->order_lock
);
704 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
)) {
705 list_add_tail(&work
->order_list
,
706 &workers
->prio_order_list
);
708 list_add_tail(&work
->order_list
, &workers
->order_list
);
710 spin_unlock(&workers
->order_lock
);
712 INIT_LIST_HEAD(&work
->order_list
);
715 spin_lock_irqsave(&worker
->lock
, flags
);
717 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
))
718 list_add_tail(&work
->list
, &worker
->prio_pending
);
720 list_add_tail(&work
->list
, &worker
->pending
);
721 check_busy_worker(worker
);
724 * avoid calling into wake_up_process if this thread has already
727 if (!worker
->working
)
732 wake_up_process(worker
->task
);
733 spin_unlock_irqrestore(&worker
->lock
, flags
);
736 struct __btrfs_workqueue_struct
{
737 struct workqueue_struct
*normal_wq
;
738 /* List head pointing to ordered work list */
739 struct list_head ordered_list
;
741 /* Spinlock for ordered_list */
742 spinlock_t list_lock
;
744 /* Thresholding related variants */
750 spinlock_t thres_lock
;
753 struct btrfs_workqueue_struct
{
754 struct __btrfs_workqueue_struct
*normal
;
755 struct __btrfs_workqueue_struct
*high
;
758 static inline struct __btrfs_workqueue_struct
759 *__btrfs_alloc_workqueue(char *name
, int flags
, int max_active
, int thresh
)
761 struct __btrfs_workqueue_struct
*ret
= kzalloc(sizeof(*ret
), GFP_NOFS
);
766 ret
->max_active
= max_active
;
767 atomic_set(&ret
->pending
, 0);
769 thresh
= DFT_THRESHOLD
;
770 /* For low threshold, disabling threshold is a better choice */
771 if (thresh
< DFT_THRESHOLD
) {
772 ret
->current_max
= max_active
;
773 ret
->thresh
= NO_THRESHOLD
;
775 ret
->current_max
= 1;
776 ret
->thresh
= thresh
;
779 if (flags
& WQ_HIGHPRI
)
780 ret
->normal_wq
= alloc_workqueue("%s-%s-high", flags
,
784 ret
->normal_wq
= alloc_workqueue("%s-%s", flags
,
785 ret
->max_active
, "btrfs",
787 if (unlikely(!ret
->normal_wq
)) {
792 INIT_LIST_HEAD(&ret
->ordered_list
);
793 spin_lock_init(&ret
->list_lock
);
794 spin_lock_init(&ret
->thres_lock
);
799 __btrfs_destroy_workqueue(struct __btrfs_workqueue_struct
*wq
);
801 struct btrfs_workqueue_struct
*btrfs_alloc_workqueue(char *name
,
806 struct btrfs_workqueue_struct
*ret
= kzalloc(sizeof(*ret
), GFP_NOFS
);
811 ret
->normal
= __btrfs_alloc_workqueue(name
, flags
& ~WQ_HIGHPRI
,
813 if (unlikely(!ret
->normal
)) {
818 if (flags
& WQ_HIGHPRI
) {
819 ret
->high
= __btrfs_alloc_workqueue(name
, flags
, max_active
,
821 if (unlikely(!ret
->high
)) {
822 __btrfs_destroy_workqueue(ret
->normal
);
831 * Hook for threshold which will be called in btrfs_queue_work.
832 * This hook WILL be called in IRQ handler context,
833 * so workqueue_set_max_active MUST NOT be called in this hook
835 static inline void thresh_queue_hook(struct __btrfs_workqueue_struct
*wq
)
837 if (wq
->thresh
== NO_THRESHOLD
)
839 atomic_inc(&wq
->pending
);
843 * Hook for threshold which will be called before executing the work,
844 * This hook is called in kthread content.
845 * So workqueue_set_max_active is called here.
847 static inline void thresh_exec_hook(struct __btrfs_workqueue_struct
*wq
)
853 if (wq
->thresh
== NO_THRESHOLD
)
856 atomic_dec(&wq
->pending
);
857 spin_lock(&wq
->thres_lock
);
859 * Use wq->count to limit the calling frequency of
860 * workqueue_set_max_active.
863 wq
->count
%= (wq
->thresh
/ 4);
866 new_max_active
= wq
->current_max
;
869 * pending may be changed later, but it's OK since we really
870 * don't need it so accurate to calculate new_max_active.
872 pending
= atomic_read(&wq
->pending
);
873 if (pending
> wq
->thresh
)
875 if (pending
< wq
->thresh
/ 2)
877 new_max_active
= clamp_val(new_max_active
, 1, wq
->max_active
);
878 if (new_max_active
!= wq
->current_max
) {
880 wq
->current_max
= new_max_active
;
883 spin_unlock(&wq
->thres_lock
);
886 workqueue_set_max_active(wq
->normal_wq
, wq
->current_max
);
890 static void run_ordered_work(struct __btrfs_workqueue_struct
*wq
)
892 struct list_head
*list
= &wq
->ordered_list
;
893 struct btrfs_work_struct
*work
;
894 spinlock_t
*lock
= &wq
->list_lock
;
898 spin_lock_irqsave(lock
, flags
);
899 if (list_empty(list
))
901 work
= list_entry(list
->next
, struct btrfs_work_struct
,
903 if (!test_bit(WORK_DONE_BIT
, &work
->flags
))
907 * we are going to call the ordered done function, but
908 * we leave the work item on the list as a barrier so
909 * that later work items that are done don't have their
910 * functions called before this one returns
912 if (test_and_set_bit(WORK_ORDER_DONE_BIT
, &work
->flags
))
914 spin_unlock_irqrestore(lock
, flags
);
915 work
->ordered_func(work
);
917 /* now take the lock again and drop our item from the list */
918 spin_lock_irqsave(lock
, flags
);
919 list_del(&work
->ordered_list
);
920 spin_unlock_irqrestore(lock
, flags
);
923 * we don't want to call the ordered free functions
924 * with the lock held though
926 work
->ordered_free(work
);
928 spin_unlock_irqrestore(lock
, flags
);
931 static void normal_work_helper(struct work_struct
*arg
)
933 struct btrfs_work_struct
*work
;
934 struct __btrfs_workqueue_struct
*wq
;
937 work
= container_of(arg
, struct btrfs_work_struct
, normal_work
);
939 * We should not touch things inside work in the following cases:
940 * 1) after work->func() if it has no ordered_free
941 * Since the struct is freed in work->func().
942 * 2) after setting WORK_DONE_BIT
943 * The work may be freed in other threads almost instantly.
944 * So we save the needed things here.
946 if (work
->ordered_func
)
950 thresh_exec_hook(wq
);
953 set_bit(WORK_DONE_BIT
, &work
->flags
);
954 run_ordered_work(wq
);
958 void btrfs_init_work(struct btrfs_work_struct
*work
,
959 void (*func
)(struct btrfs_work_struct
*),
960 void (*ordered_func
)(struct btrfs_work_struct
*),
961 void (*ordered_free
)(struct btrfs_work_struct
*))
964 work
->ordered_func
= ordered_func
;
965 work
->ordered_free
= ordered_free
;
966 INIT_WORK(&work
->normal_work
, normal_work_helper
);
967 INIT_LIST_HEAD(&work
->ordered_list
);
971 static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct
*wq
,
972 struct btrfs_work_struct
*work
)
977 thresh_queue_hook(wq
);
978 if (work
->ordered_func
) {
979 spin_lock_irqsave(&wq
->list_lock
, flags
);
980 list_add_tail(&work
->ordered_list
, &wq
->ordered_list
);
981 spin_unlock_irqrestore(&wq
->list_lock
, flags
);
983 queue_work(wq
->normal_wq
, &work
->normal_work
);
986 void btrfs_queue_work(struct btrfs_workqueue_struct
*wq
,
987 struct btrfs_work_struct
*work
)
989 struct __btrfs_workqueue_struct
*dest_wq
;
991 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
) && wq
->high
)
994 dest_wq
= wq
->normal
;
995 __btrfs_queue_work(dest_wq
, work
);
999 __btrfs_destroy_workqueue(struct __btrfs_workqueue_struct
*wq
)
1001 destroy_workqueue(wq
->normal_wq
);
1005 void btrfs_destroy_workqueue(struct btrfs_workqueue_struct
*wq
)
1010 __btrfs_destroy_workqueue(wq
->high
);
1011 __btrfs_destroy_workqueue(wq
->normal
);
1014 void btrfs_workqueue_set_max(struct btrfs_workqueue_struct
*wq
, int max
)
1016 wq
->normal
->max_active
= max
;
1018 wq
->high
->max_active
= max
;
1021 void btrfs_set_work_high_priority(struct btrfs_work_struct
*work
)
1023 set_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
);