2 * Copyright (C) 2007 Oracle. All rights reserved.
3 * Copyright (C) 2014 Fujitsu. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/freezer.h>
25 #include "async-thread.h"
28 #define WORK_DONE_BIT 0
29 #define WORK_ORDER_DONE_BIT 1
30 #define WORK_HIGH_PRIO_BIT 2
32 #define NO_THRESHOLD (-1)
33 #define DFT_THRESHOLD (32)
35 struct __btrfs_workqueue
{
36 struct workqueue_struct
*normal_wq
;
37 /* List head pointing to ordered work list */
38 struct list_head ordered_list
;
40 /* Spinlock for ordered_list */
43 /* Thresholding related variants */
49 spinlock_t thres_lock
;
52 struct btrfs_workqueue
{
53 struct __btrfs_workqueue
*normal
;
54 struct __btrfs_workqueue
*high
;
57 static void normal_work_helper(struct btrfs_work
*work
);
59 #define BTRFS_WORK_HELPER(name) \
60 void btrfs_##name(struct work_struct *arg) \
62 struct btrfs_work *work = container_of(arg, struct btrfs_work, \
64 normal_work_helper(work); \
67 BTRFS_WORK_HELPER(worker_helper
);
68 BTRFS_WORK_HELPER(delalloc_helper
);
69 BTRFS_WORK_HELPER(flush_delalloc_helper
);
70 BTRFS_WORK_HELPER(cache_helper
);
71 BTRFS_WORK_HELPER(submit_helper
);
72 BTRFS_WORK_HELPER(fixup_helper
);
73 BTRFS_WORK_HELPER(endio_helper
);
74 BTRFS_WORK_HELPER(endio_meta_helper
);
75 BTRFS_WORK_HELPER(endio_meta_write_helper
);
76 BTRFS_WORK_HELPER(endio_raid56_helper
);
77 BTRFS_WORK_HELPER(rmw_helper
);
78 BTRFS_WORK_HELPER(endio_write_helper
);
79 BTRFS_WORK_HELPER(freespace_write_helper
);
80 BTRFS_WORK_HELPER(delayed_meta_helper
);
81 BTRFS_WORK_HELPER(readahead_helper
);
82 BTRFS_WORK_HELPER(qgroup_rescan_helper
);
83 BTRFS_WORK_HELPER(extent_refs_helper
);
84 BTRFS_WORK_HELPER(scrub_helper
);
85 BTRFS_WORK_HELPER(scrubwrc_helper
);
86 BTRFS_WORK_HELPER(scrubnc_helper
);
88 static struct __btrfs_workqueue
*
89 __btrfs_alloc_workqueue(const char *name
, int flags
, int max_active
,
92 struct __btrfs_workqueue
*ret
= kzalloc(sizeof(*ret
), GFP_NOFS
);
97 ret
->max_active
= max_active
;
98 atomic_set(&ret
->pending
, 0);
100 thresh
= DFT_THRESHOLD
;
101 /* For low threshold, disabling threshold is a better choice */
102 if (thresh
< DFT_THRESHOLD
) {
103 ret
->current_max
= max_active
;
104 ret
->thresh
= NO_THRESHOLD
;
106 ret
->current_max
= 1;
107 ret
->thresh
= thresh
;
110 if (flags
& WQ_HIGHPRI
)
111 ret
->normal_wq
= alloc_workqueue("%s-%s-high", flags
,
115 ret
->normal_wq
= alloc_workqueue("%s-%s", flags
,
116 ret
->max_active
, "btrfs",
118 if (unlikely(!ret
->normal_wq
)) {
123 INIT_LIST_HEAD(&ret
->ordered_list
);
124 spin_lock_init(&ret
->list_lock
);
125 spin_lock_init(&ret
->thres_lock
);
126 trace_btrfs_workqueue_alloc(ret
, name
, flags
& WQ_HIGHPRI
);
131 __btrfs_destroy_workqueue(struct __btrfs_workqueue
*wq
);
133 struct btrfs_workqueue
*btrfs_alloc_workqueue(const char *name
,
138 struct btrfs_workqueue
*ret
= kzalloc(sizeof(*ret
), GFP_NOFS
);
143 ret
->normal
= __btrfs_alloc_workqueue(name
, flags
& ~WQ_HIGHPRI
,
145 if (unlikely(!ret
->normal
)) {
150 if (flags
& WQ_HIGHPRI
) {
151 ret
->high
= __btrfs_alloc_workqueue(name
, flags
, max_active
,
153 if (unlikely(!ret
->high
)) {
154 __btrfs_destroy_workqueue(ret
->normal
);
163 * Hook for threshold which will be called in btrfs_queue_work.
164 * This hook WILL be called in IRQ handler context,
165 * so workqueue_set_max_active MUST NOT be called in this hook
167 static inline void thresh_queue_hook(struct __btrfs_workqueue
*wq
)
169 if (wq
->thresh
== NO_THRESHOLD
)
171 atomic_inc(&wq
->pending
);
175 * Hook for threshold which will be called before executing the work,
176 * This hook is called in kthread content.
177 * So workqueue_set_max_active is called here.
179 static inline void thresh_exec_hook(struct __btrfs_workqueue
*wq
)
185 if (wq
->thresh
== NO_THRESHOLD
)
188 atomic_dec(&wq
->pending
);
189 spin_lock(&wq
->thres_lock
);
191 * Use wq->count to limit the calling frequency of
192 * workqueue_set_max_active.
195 wq
->count
%= (wq
->thresh
/ 4);
198 new_max_active
= wq
->current_max
;
201 * pending may be changed later, but it's OK since we really
202 * don't need it so accurate to calculate new_max_active.
204 pending
= atomic_read(&wq
->pending
);
205 if (pending
> wq
->thresh
)
207 if (pending
< wq
->thresh
/ 2)
209 new_max_active
= clamp_val(new_max_active
, 1, wq
->max_active
);
210 if (new_max_active
!= wq
->current_max
) {
212 wq
->current_max
= new_max_active
;
215 spin_unlock(&wq
->thres_lock
);
218 workqueue_set_max_active(wq
->normal_wq
, wq
->current_max
);
222 static void run_ordered_work(struct __btrfs_workqueue
*wq
)
224 struct list_head
*list
= &wq
->ordered_list
;
225 struct btrfs_work
*work
;
226 spinlock_t
*lock
= &wq
->list_lock
;
230 spin_lock_irqsave(lock
, flags
);
231 if (list_empty(list
))
233 work
= list_entry(list
->next
, struct btrfs_work
,
235 if (!test_bit(WORK_DONE_BIT
, &work
->flags
))
239 * we are going to call the ordered done function, but
240 * we leave the work item on the list as a barrier so
241 * that later work items that are done don't have their
242 * functions called before this one returns
244 if (test_and_set_bit(WORK_ORDER_DONE_BIT
, &work
->flags
))
246 trace_btrfs_ordered_sched(work
);
247 spin_unlock_irqrestore(lock
, flags
);
248 work
->ordered_func(work
);
250 /* now take the lock again and drop our item from the list */
251 spin_lock_irqsave(lock
, flags
);
252 list_del(&work
->ordered_list
);
253 spin_unlock_irqrestore(lock
, flags
);
256 * we don't want to call the ordered free functions
257 * with the lock held though
259 work
->ordered_free(work
);
260 trace_btrfs_all_work_done(work
);
262 spin_unlock_irqrestore(lock
, flags
);
265 static void normal_work_helper(struct btrfs_work
*work
)
267 struct __btrfs_workqueue
*wq
;
271 * We should not touch things inside work in the following cases:
272 * 1) after work->func() if it has no ordered_free
273 * Since the struct is freed in work->func().
274 * 2) after setting WORK_DONE_BIT
275 * The work may be freed in other threads almost instantly.
276 * So we save the needed things here.
278 if (work
->ordered_func
)
282 trace_btrfs_work_sched(work
);
283 thresh_exec_hook(wq
);
286 set_bit(WORK_DONE_BIT
, &work
->flags
);
287 run_ordered_work(wq
);
290 trace_btrfs_all_work_done(work
);
293 void btrfs_init_work(struct btrfs_work
*work
, btrfs_work_func_t uniq_func
,
295 btrfs_func_t ordered_func
,
296 btrfs_func_t ordered_free
)
299 work
->ordered_func
= ordered_func
;
300 work
->ordered_free
= ordered_free
;
301 INIT_WORK(&work
->normal_work
, uniq_func
);
302 INIT_LIST_HEAD(&work
->ordered_list
);
306 static inline void __btrfs_queue_work(struct __btrfs_workqueue
*wq
,
307 struct btrfs_work
*work
)
312 thresh_queue_hook(wq
);
313 if (work
->ordered_func
) {
314 spin_lock_irqsave(&wq
->list_lock
, flags
);
315 list_add_tail(&work
->ordered_list
, &wq
->ordered_list
);
316 spin_unlock_irqrestore(&wq
->list_lock
, flags
);
318 queue_work(wq
->normal_wq
, &work
->normal_work
);
319 trace_btrfs_work_queued(work
);
322 void btrfs_queue_work(struct btrfs_workqueue
*wq
,
323 struct btrfs_work
*work
)
325 struct __btrfs_workqueue
*dest_wq
;
327 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
) && wq
->high
)
330 dest_wq
= wq
->normal
;
331 __btrfs_queue_work(dest_wq
, work
);
335 __btrfs_destroy_workqueue(struct __btrfs_workqueue
*wq
)
337 destroy_workqueue(wq
->normal_wq
);
338 trace_btrfs_workqueue_destroy(wq
);
342 void btrfs_destroy_workqueue(struct btrfs_workqueue
*wq
)
347 __btrfs_destroy_workqueue(wq
->high
);
348 __btrfs_destroy_workqueue(wq
->normal
);
352 void btrfs_workqueue_set_max(struct btrfs_workqueue
*wq
, int max
)
356 wq
->normal
->max_active
= max
;
358 wq
->high
->max_active
= max
;
361 void btrfs_set_work_high_priority(struct btrfs_work
*work
)
363 set_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
);