]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/btrfs/async-thread.c
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kthread.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 # include <linux/freezer.h>
23 #include "async-thread.h"
25 #define WORK_QUEUED_BIT 0
26 #define WORK_DONE_BIT 1
27 #define WORK_ORDER_DONE_BIT 2
30 * container for the kthread task pointer and the list of pending work
31 * One of these is allocated per thread.
33 struct btrfs_worker_thread
{
34 /* pool we belong to */
35 struct btrfs_workers
*workers
;
37 /* list of struct btrfs_work that are waiting for service */
38 struct list_head pending
;
40 /* list of worker threads from struct btrfs_workers */
41 struct list_head worker_list
;
44 struct task_struct
*task
;
46 /* number of things on the pending list */
49 unsigned long sequence
;
51 /* protects the pending list. */
54 /* set to non-zero when this thread is already awake and kicking */
57 /* are we currently idle */
62 * helper function to move a thread onto the idle list after it
63 * has finished some requests.
65 static void check_idle_worker(struct btrfs_worker_thread
*worker
)
67 if (!worker
->idle
&& atomic_read(&worker
->num_pending
) <
68 worker
->workers
->idle_thresh
/ 2) {
70 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
72 list_move(&worker
->worker_list
, &worker
->workers
->idle_list
);
73 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
78 * helper function to move a thread off the idle list after new
79 * pending work is added.
81 static void check_busy_worker(struct btrfs_worker_thread
*worker
)
83 if (worker
->idle
&& atomic_read(&worker
->num_pending
) >=
84 worker
->workers
->idle_thresh
) {
86 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
88 list_move_tail(&worker
->worker_list
,
89 &worker
->workers
->worker_list
);
90 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
94 static noinline
int run_ordered_completions(struct btrfs_workers
*workers
,
95 struct btrfs_work
*work
)
99 if (!workers
->ordered
)
102 set_bit(WORK_DONE_BIT
, &work
->flags
);
104 spin_lock_irqsave(&workers
->lock
, flags
);
106 while (!list_empty(&workers
->order_list
)) {
107 work
= list_entry(workers
->order_list
.next
,
108 struct btrfs_work
, order_list
);
110 if (!test_bit(WORK_DONE_BIT
, &work
->flags
))
113 /* we are going to call the ordered done function, but
114 * we leave the work item on the list as a barrier so
115 * that later work items that are done don't have their
116 * functions called before this one returns
118 if (test_and_set_bit(WORK_ORDER_DONE_BIT
, &work
->flags
))
121 spin_unlock_irqrestore(&workers
->lock
, flags
);
123 work
->ordered_func(work
);
125 /* now take the lock again and call the freeing code */
126 spin_lock_irqsave(&workers
->lock
, flags
);
127 list_del(&work
->order_list
);
128 work
->ordered_free(work
);
131 spin_unlock_irqrestore(&workers
->lock
, flags
);
136 * main loop for servicing work items
138 static int worker_loop(void *arg
)
140 struct btrfs_worker_thread
*worker
= arg
;
141 struct list_head
*cur
;
142 struct btrfs_work
*work
;
144 spin_lock_irq(&worker
->lock
);
145 while (!list_empty(&worker
->pending
)) {
146 cur
= worker
->pending
.next
;
147 work
= list_entry(cur
, struct btrfs_work
, list
);
148 list_del(&work
->list
);
149 clear_bit(WORK_QUEUED_BIT
, &work
->flags
);
151 work
->worker
= worker
;
152 spin_unlock_irq(&worker
->lock
);
156 atomic_dec(&worker
->num_pending
);
158 * unless this is an ordered work queue,
159 * 'work' was probably freed by func above.
161 run_ordered_completions(worker
->workers
, work
);
163 spin_lock_irq(&worker
->lock
);
164 check_idle_worker(worker
);
168 if (freezing(current
)) {
171 set_current_state(TASK_INTERRUPTIBLE
);
172 spin_unlock_irq(&worker
->lock
);
173 if (!kthread_should_stop())
175 __set_current_state(TASK_RUNNING
);
177 } while (!kthread_should_stop());
182 * this will wait for all the worker threads to shutdown
184 int btrfs_stop_workers(struct btrfs_workers
*workers
)
186 struct list_head
*cur
;
187 struct btrfs_worker_thread
*worker
;
189 list_splice_init(&workers
->idle_list
, &workers
->worker_list
);
190 while (!list_empty(&workers
->worker_list
)) {
191 cur
= workers
->worker_list
.next
;
192 worker
= list_entry(cur
, struct btrfs_worker_thread
,
194 kthread_stop(worker
->task
);
195 list_del(&worker
->worker_list
);
202 * simple init on struct btrfs_workers
204 void btrfs_init_workers(struct btrfs_workers
*workers
, char *name
, int max
)
206 workers
->num_workers
= 0;
207 INIT_LIST_HEAD(&workers
->worker_list
);
208 INIT_LIST_HEAD(&workers
->idle_list
);
209 INIT_LIST_HEAD(&workers
->order_list
);
210 spin_lock_init(&workers
->lock
);
211 workers
->max_workers
= max
;
212 workers
->idle_thresh
= 32;
213 workers
->name
= name
;
214 workers
->ordered
= 0;
218 * starts new worker threads. This does not enforce the max worker
219 * count in case you need to temporarily go past it.
221 int btrfs_start_workers(struct btrfs_workers
*workers
, int num_workers
)
223 struct btrfs_worker_thread
*worker
;
227 for (i
= 0; i
< num_workers
; i
++) {
228 worker
= kzalloc(sizeof(*worker
), GFP_NOFS
);
234 INIT_LIST_HEAD(&worker
->pending
);
235 INIT_LIST_HEAD(&worker
->worker_list
);
236 spin_lock_init(&worker
->lock
);
237 atomic_set(&worker
->num_pending
, 0);
238 worker
->task
= kthread_run(worker_loop
, worker
,
239 "btrfs-%s-%d", workers
->name
,
240 workers
->num_workers
+ i
);
241 worker
->workers
= workers
;
242 if (IS_ERR(worker
->task
)) {
244 ret
= PTR_ERR(worker
->task
);
248 spin_lock_irq(&workers
->lock
);
249 list_add_tail(&worker
->worker_list
, &workers
->idle_list
);
251 workers
->num_workers
++;
252 spin_unlock_irq(&workers
->lock
);
256 btrfs_stop_workers(workers
);
261 * run through the list and find a worker thread that doesn't have a lot
262 * to do right now. This can return null if we aren't yet at the thread
263 * count limit and all of the threads are busy.
265 static struct btrfs_worker_thread
*next_worker(struct btrfs_workers
*workers
)
267 struct btrfs_worker_thread
*worker
;
268 struct list_head
*next
;
269 int enforce_min
= workers
->num_workers
< workers
->max_workers
;
272 * if we find an idle thread, don't move it to the end of the
273 * idle list. This improves the chance that the next submission
274 * will reuse the same thread, and maybe catch it while it is still
277 if (!list_empty(&workers
->idle_list
)) {
278 next
= workers
->idle_list
.next
;
279 worker
= list_entry(next
, struct btrfs_worker_thread
,
283 if (enforce_min
|| list_empty(&workers
->worker_list
))
287 * if we pick a busy task, move the task to the end of the list.
288 * hopefully this will keep things somewhat evenly balanced.
289 * Do the move in batches based on the sequence number. This groups
290 * requests submitted at roughly the same time onto the same worker.
292 next
= workers
->worker_list
.next
;
293 worker
= list_entry(next
, struct btrfs_worker_thread
, worker_list
);
294 atomic_inc(&worker
->num_pending
);
297 if (worker
->sequence
% workers
->idle_thresh
== 0)
298 list_move_tail(next
, &workers
->worker_list
);
303 * selects a worker thread to take the next job. This will either find
304 * an idle worker, start a new worker up to the max count, or just return
305 * one of the existing busy workers.
307 static struct btrfs_worker_thread
*find_worker(struct btrfs_workers
*workers
)
309 struct btrfs_worker_thread
*worker
;
313 spin_lock_irqsave(&workers
->lock
, flags
);
314 worker
= next_worker(workers
);
315 spin_unlock_irqrestore(&workers
->lock
, flags
);
318 spin_lock_irqsave(&workers
->lock
, flags
);
319 if (workers
->num_workers
>= workers
->max_workers
) {
320 struct list_head
*fallback
= NULL
;
322 * we have failed to find any workers, just
323 * return the force one
325 if (!list_empty(&workers
->worker_list
))
326 fallback
= workers
->worker_list
.next
;
327 if (!list_empty(&workers
->idle_list
))
328 fallback
= workers
->idle_list
.next
;
330 worker
= list_entry(fallback
,
331 struct btrfs_worker_thread
, worker_list
);
332 spin_unlock_irqrestore(&workers
->lock
, flags
);
334 spin_unlock_irqrestore(&workers
->lock
, flags
);
335 /* we're below the limit, start another worker */
336 btrfs_start_workers(workers
, 1);
344 * btrfs_requeue_work just puts the work item back on the tail of the list
345 * it was taken from. It is intended for use with long running work functions
346 * that make some progress and want to give the cpu up for others.
348 int btrfs_requeue_work(struct btrfs_work
*work
)
350 struct btrfs_worker_thread
*worker
= work
->worker
;
353 if (test_and_set_bit(WORK_QUEUED_BIT
, &work
->flags
))
356 spin_lock_irqsave(&worker
->lock
, flags
);
357 atomic_inc(&worker
->num_pending
);
358 list_add_tail(&work
->list
, &worker
->pending
);
360 /* by definition we're busy, take ourselves off the idle
364 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
366 list_move_tail(&worker
->worker_list
,
367 &worker
->workers
->worker_list
);
368 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
371 spin_unlock_irqrestore(&worker
->lock
, flags
);
378 * places a struct btrfs_work into the pending queue of one of the kthreads
380 int btrfs_queue_worker(struct btrfs_workers
*workers
, struct btrfs_work
*work
)
382 struct btrfs_worker_thread
*worker
;
386 /* don't requeue something already on a list */
387 if (test_and_set_bit(WORK_QUEUED_BIT
, &work
->flags
))
390 worker
= find_worker(workers
);
391 if (workers
->ordered
) {
392 spin_lock_irqsave(&workers
->lock
, flags
);
393 list_add_tail(&work
->order_list
, &workers
->order_list
);
394 spin_unlock_irqrestore(&workers
->lock
, flags
);
396 INIT_LIST_HEAD(&work
->order_list
);
399 spin_lock_irqsave(&worker
->lock
, flags
);
400 atomic_inc(&worker
->num_pending
);
401 check_busy_worker(worker
);
402 list_add_tail(&work
->list
, &worker
->pending
);
405 * avoid calling into wake_up_process if this thread has already
408 if (!worker
->working
)
412 spin_unlock_irqrestore(&worker
->lock
, flags
);
415 wake_up_process(worker
->task
);