]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/btrfs/async-thread.c
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kthread.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/freezer.h>
23 #include "async-thread.h"
26 * container for the kthread task pointer and the list of pending work
27 * One of these is allocated per thread.
29 struct btrfs_worker_thread
{
30 /* list of struct btrfs_work that are waiting for service */
31 struct list_head pending
;
33 /* list of worker threads from struct btrfs_workers */
34 struct list_head worker_list
;
37 struct task_struct
*task
;
39 /* number of things on the pending list */
42 /* protects the pending list. */
45 /* set to non-zero when this thread is already awake and kicking */
50 * main loop for servicing work items
52 static int worker_loop(void *arg
)
54 struct btrfs_worker_thread
*worker
= arg
;
55 struct list_head
*cur
;
56 struct btrfs_work
*work
;
58 spin_lock_irq(&worker
->lock
);
59 while(!list_empty(&worker
->pending
)) {
60 cur
= worker
->pending
.next
;
61 work
= list_entry(cur
, struct btrfs_work
, list
);
62 list_del(&work
->list
);
63 clear_bit(0, &work
->flags
);
65 work
->worker
= worker
;
66 spin_unlock_irq(&worker
->lock
);
70 atomic_dec(&worker
->num_pending
);
71 spin_lock_irq(&worker
->lock
);
74 if (freezing(current
)) {
77 set_current_state(TASK_INTERRUPTIBLE
);
78 spin_unlock_irq(&worker
->lock
);
80 __set_current_state(TASK_RUNNING
);
82 } while (!kthread_should_stop());
87 * this will wait for all the worker threads to shutdown
89 int btrfs_stop_workers(struct btrfs_workers
*workers
)
91 struct list_head
*cur
;
92 struct btrfs_worker_thread
*worker
;
94 while(!list_empty(&workers
->worker_list
)) {
95 cur
= workers
->worker_list
.next
;
96 worker
= list_entry(cur
, struct btrfs_worker_thread
,
98 kthread_stop(worker
->task
);
99 list_del(&worker
->worker_list
);
106 * simple init on struct btrfs_workers
108 void btrfs_init_workers(struct btrfs_workers
*workers
, int max
)
110 workers
->num_workers
= 0;
111 INIT_LIST_HEAD(&workers
->worker_list
);
112 workers
->last
= NULL
;
113 spin_lock_init(&workers
->lock
);
114 workers
->max_workers
= max
;
118 * starts new worker threads. This does not enforce the max worker
119 * count in case you need to temporarily go past it.
121 int btrfs_start_workers(struct btrfs_workers
*workers
, int num_workers
)
123 struct btrfs_worker_thread
*worker
;
127 for (i
= 0; i
< num_workers
; i
++) {
128 worker
= kzalloc(sizeof(*worker
), GFP_NOFS
);
134 INIT_LIST_HEAD(&worker
->pending
);
135 INIT_LIST_HEAD(&worker
->worker_list
);
136 spin_lock_init(&worker
->lock
);
137 atomic_set(&worker
->num_pending
, 0);
138 worker
->task
= kthread_run(worker_loop
, worker
, "btrfs");
139 if (IS_ERR(worker
->task
)) {
140 ret
= PTR_ERR(worker
->task
);
144 spin_lock_irq(&workers
->lock
);
145 list_add_tail(&worker
->worker_list
, &workers
->worker_list
);
146 workers
->last
= worker
;
147 workers
->num_workers
++;
148 spin_unlock_irq(&workers
->lock
);
152 btrfs_stop_workers(workers
);
157 * run through the list and find a worker thread that doesn't have a lot
158 * to do right now. This can return null if we aren't yet at the thread
159 * count limit and all of the threads are busy.
161 static struct btrfs_worker_thread
*next_worker(struct btrfs_workers
*workers
)
163 struct btrfs_worker_thread
*worker
;
164 struct list_head
*next
;
165 struct list_head
*start
;
166 int enforce_min
= workers
->num_workers
< workers
->max_workers
;
168 /* start with the last thread if it isn't busy */
169 worker
= workers
->last
;
170 if (atomic_read(&worker
->num_pending
) < 64)
173 next
= worker
->worker_list
.next
;
174 start
= &worker
->worker_list
;
177 * check all the workers for someone that is bored. FIXME, do
178 * something smart here
180 while(next
!= start
) {
181 if (next
== &workers
->worker_list
) {
182 next
= workers
->worker_list
.next
;
185 worker
= list_entry(next
, struct btrfs_worker_thread
,
187 if (atomic_read(&worker
->num_pending
) < 64 || !enforce_min
)
192 * nobody was bored, if we're already at the max thread count,
193 * use the last thread
195 if (!enforce_min
|| atomic_read(&workers
->last
->num_pending
) < 64) {
196 return workers
->last
;
200 workers
->last
= worker
;
204 static struct btrfs_worker_thread
*find_worker(struct btrfs_workers
*workers
)
206 struct btrfs_worker_thread
*worker
;
210 spin_lock_irqsave(&workers
->lock
, flags
);
211 worker
= next_worker(workers
);
212 spin_unlock_irqrestore(&workers
->lock
, flags
);
215 spin_lock_irqsave(&workers
->lock
, flags
);
216 if (workers
->num_workers
>= workers
->max_workers
) {
218 * we have failed to find any workers, just
219 * return the force one
221 worker
= list_entry(workers
->worker_list
.next
,
222 struct btrfs_worker_thread
, worker_list
);
223 spin_unlock_irqrestore(&workers
->lock
, flags
);
225 spin_unlock_irqrestore(&workers
->lock
, flags
);
226 /* we're below the limit, start another worker */
227 btrfs_start_workers(workers
, 1);
235 * btrfs_requeue_work just puts the work item back on the tail of the list
236 * it was taken from. It is intended for use with long running work functions
237 * that make some progress and want to give the cpu up for others.
239 int btrfs_requeue_work(struct btrfs_work
*work
)
241 struct btrfs_worker_thread
*worker
= work
->worker
;
244 if (test_and_set_bit(0, &work
->flags
))
247 spin_lock_irqsave(&worker
->lock
, flags
);
248 atomic_inc(&worker
->num_pending
);
249 list_add_tail(&work
->list
, &worker
->pending
);
250 spin_unlock_irqrestore(&worker
->lock
, flags
);
256 * places a struct btrfs_work into the pending queue of one of the kthreads
258 int btrfs_queue_worker(struct btrfs_workers
*workers
, struct btrfs_work
*work
)
260 struct btrfs_worker_thread
*worker
;
264 /* don't requeue something already on a list */
265 if (test_and_set_bit(0, &work
->flags
))
268 worker
= find_worker(workers
);
270 spin_lock_irqsave(&worker
->lock
, flags
);
271 atomic_inc(&worker
->num_pending
);
272 list_add_tail(&work
->list
, &worker
->pending
);
275 * avoid calling into wake_up_process if this thread has already
278 if (!worker
->working
)
282 spin_unlock_irqrestore(&worker
->lock
, flags
);
285 wake_up_process(worker
->task
);