]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/net/sunrpc/sched.c | |
3 | * | |
4 | * Scheduling for synchronous and asynchronous RPC requests. | |
5 | * | |
6 | * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> | |
cca5172a | 7 | * |
1da177e4 LT |
8 | * TCP NFS related read + write fixes |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | ||
14 | #include <linux/sched.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/mempool.h> | |
18 | #include <linux/smp.h> | |
1da177e4 | 19 | #include <linux/spinlock.h> |
4a3e2f71 | 20 | #include <linux/mutex.h> |
1da177e4 LT |
21 | |
22 | #include <linux/sunrpc/clnt.h> | |
1da177e4 LT |
23 | |
24 | #ifdef RPC_DEBUG | |
25 | #define RPCDBG_FACILITY RPCDBG_SCHED | |
26 | #define RPC_TASK_MAGIC_ID 0xf00baa | |
1da177e4 LT |
27 | #endif |
28 | ||
29 | /* | |
30 | * RPC slabs and memory pools | |
31 | */ | |
32 | #define RPC_BUFFER_MAXSIZE (2048) | |
33 | #define RPC_BUFFER_POOLSIZE (8) | |
34 | #define RPC_TASK_POOLSIZE (8) | |
e18b890b CL |
35 | static struct kmem_cache *rpc_task_slabp __read_mostly; |
36 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; | |
ba89966c ED |
37 | static mempool_t *rpc_task_mempool __read_mostly; |
38 | static mempool_t *rpc_buffer_mempool __read_mostly; | |
1da177e4 | 39 | |
65f27f38 | 40 | static void rpc_async_schedule(struct work_struct *); |
bde8f00c | 41 | static void rpc_release_task(struct rpc_task *task); |
36df9aae | 42 | static void __rpc_queue_timer_fn(unsigned long ptr); |
1da177e4 | 43 | |
1da177e4 LT |
44 | /* |
45 | * RPC tasks sit here while waiting for conditions to improve. | |
46 | */ | |
a4a87499 | 47 | static struct rpc_wait_queue delay_queue; |
1da177e4 | 48 | |
1da177e4 LT |
49 | /* |
50 | * rpciod-related stuff | |
51 | */ | |
24c5d9d7 | 52 | struct workqueue_struct *rpciod_workqueue; |
1da177e4 | 53 | |
1da177e4 LT |
54 | /* |
55 | * Disable the timer for a given RPC task. Should be called with | |
56 | * queue->lock and bh_disabled in order to avoid races within | |
57 | * rpc_run_timer(). | |
58 | */ | |
5d00837b | 59 | static void |
eb276c0e | 60 | __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 61 | { |
36df9aae TM |
62 | if (task->tk_timeout == 0) |
63 | return; | |
46121cf7 | 64 | dprintk("RPC: %5u disabling timer\n", task->tk_pid); |
1da177e4 | 65 | task->tk_timeout = 0; |
36df9aae | 66 | list_del(&task->u.tk_wait.timer_list); |
eb276c0e TM |
67 | if (list_empty(&queue->timer_list.list)) |
68 | del_timer(&queue->timer_list.timer); | |
36df9aae TM |
69 | } |
70 | ||
71 | static void | |
72 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) | |
73 | { | |
74 | queue->timer_list.expires = expires; | |
75 | mod_timer(&queue->timer_list.timer, expires); | |
1da177e4 LT |
76 | } |
77 | ||
1da177e4 LT |
78 | /* |
79 | * Set up a timer for the current task. | |
80 | */ | |
5d00837b | 81 | static void |
eb276c0e | 82 | __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 LT |
83 | { |
84 | if (!task->tk_timeout) | |
85 | return; | |
86 | ||
46121cf7 | 87 | dprintk("RPC: %5u setting alarm for %lu ms\n", |
1da177e4 LT |
88 | task->tk_pid, task->tk_timeout * 1000 / HZ); |
89 | ||
eb276c0e TM |
90 | task->u.tk_wait.expires = jiffies + task->tk_timeout; |
91 | if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) | |
92 | rpc_set_queue_timer(queue, task->u.tk_wait.expires); | |
93 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); | |
1da177e4 LT |
94 | } |
95 | ||
96 | /* | |
97 | * Add new request to a priority queue. | |
98 | */ | |
99 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) | |
100 | { | |
101 | struct list_head *q; | |
102 | struct rpc_task *t; | |
103 | ||
104 | INIT_LIST_HEAD(&task->u.tk_wait.links); | |
105 | q = &queue->tasks[task->tk_priority]; | |
106 | if (unlikely(task->tk_priority > queue->maxpriority)) | |
107 | q = &queue->tasks[queue->maxpriority]; | |
108 | list_for_each_entry(t, q, u.tk_wait.list) { | |
3ff7576d | 109 | if (t->tk_owner == task->tk_owner) { |
1da177e4 LT |
110 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); |
111 | return; | |
112 | } | |
113 | } | |
114 | list_add_tail(&task->u.tk_wait.list, q); | |
115 | } | |
116 | ||
117 | /* | |
118 | * Add new request to wait queue. | |
119 | * | |
120 | * Swapper tasks always get inserted at the head of the queue. | |
121 | * This should avoid many nasty memory deadlocks and hopefully | |
122 | * improve overall performance. | |
123 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. | |
124 | */ | |
125 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) | |
126 | { | |
127 | BUG_ON (RPC_IS_QUEUED(task)); | |
128 | ||
129 | if (RPC_IS_PRIORITY(queue)) | |
130 | __rpc_add_wait_queue_priority(queue, task); | |
131 | else if (RPC_IS_SWAPPER(task)) | |
132 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | |
133 | else | |
134 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | |
96ef13b2 | 135 | task->tk_waitqueue = queue; |
e19b63da | 136 | queue->qlen++; |
1da177e4 LT |
137 | rpc_set_queued(task); |
138 | ||
46121cf7 CL |
139 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
140 | task->tk_pid, queue, rpc_qname(queue)); | |
1da177e4 LT |
141 | } |
142 | ||
143 | /* | |
144 | * Remove request from a priority queue. | |
145 | */ | |
146 | static void __rpc_remove_wait_queue_priority(struct rpc_task *task) | |
147 | { | |
148 | struct rpc_task *t; | |
149 | ||
150 | if (!list_empty(&task->u.tk_wait.links)) { | |
151 | t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); | |
152 | list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); | |
153 | list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); | |
154 | } | |
1da177e4 LT |
155 | } |
156 | ||
157 | /* | |
158 | * Remove request from queue. | |
159 | * Note: must be called with spin lock held. | |
160 | */ | |
96ef13b2 | 161 | static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 162 | { |
eb276c0e | 163 | __rpc_disable_timer(queue, task); |
1da177e4 LT |
164 | if (RPC_IS_PRIORITY(queue)) |
165 | __rpc_remove_wait_queue_priority(task); | |
36df9aae | 166 | list_del(&task->u.tk_wait.list); |
e19b63da | 167 | queue->qlen--; |
46121cf7 CL |
168 | dprintk("RPC: %5u removed from queue %p \"%s\"\n", |
169 | task->tk_pid, queue, rpc_qname(queue)); | |
1da177e4 LT |
170 | } |
171 | ||
172 | static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) | |
173 | { | |
174 | queue->priority = priority; | |
175 | queue->count = 1 << (priority * 2); | |
176 | } | |
177 | ||
3ff7576d | 178 | static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) |
1da177e4 | 179 | { |
3ff7576d | 180 | queue->owner = pid; |
1da177e4 LT |
181 | queue->nr = RPC_BATCH_COUNT; |
182 | } | |
183 | ||
184 | static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) | |
185 | { | |
186 | rpc_set_waitqueue_priority(queue, queue->maxpriority); | |
3ff7576d | 187 | rpc_set_waitqueue_owner(queue, 0); |
1da177e4 LT |
188 | } |
189 | ||
3ff7576d | 190 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) |
1da177e4 LT |
191 | { |
192 | int i; | |
193 | ||
194 | spin_lock_init(&queue->lock); | |
195 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) | |
196 | INIT_LIST_HEAD(&queue->tasks[i]); | |
3ff7576d | 197 | queue->maxpriority = nr_queues - 1; |
1da177e4 | 198 | rpc_reset_waitqueue_priority(queue); |
36df9aae TM |
199 | queue->qlen = 0; |
200 | setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); | |
201 | INIT_LIST_HEAD(&queue->timer_list.list); | |
1da177e4 LT |
202 | #ifdef RPC_DEBUG |
203 | queue->name = qname; | |
204 | #endif | |
205 | } | |
206 | ||
207 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
208 | { | |
3ff7576d | 209 | __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); |
1da177e4 LT |
210 | } |
211 | ||
212 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
213 | { | |
3ff7576d | 214 | __rpc_init_priority_wait_queue(queue, qname, 1); |
1da177e4 | 215 | } |
e8914c65 | 216 | EXPORT_SYMBOL_GPL(rpc_init_wait_queue); |
1da177e4 | 217 | |
f6a1cc89 TM |
218 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) |
219 | { | |
36df9aae | 220 | del_timer_sync(&queue->timer_list.timer); |
f6a1cc89 TM |
221 | } |
222 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); | |
223 | ||
150030b7 | 224 | static int rpc_wait_bit_killable(void *word) |
44c28873 | 225 | { |
150030b7 | 226 | if (fatal_signal_pending(current)) |
44c28873 TM |
227 | return -ERESTARTSYS; |
228 | schedule(); | |
229 | return 0; | |
230 | } | |
231 | ||
c44fe705 TM |
232 | #ifdef RPC_DEBUG |
233 | static void rpc_task_set_debuginfo(struct rpc_task *task) | |
234 | { | |
235 | static atomic_t rpc_pid; | |
236 | ||
237 | task->tk_magic = RPC_TASK_MAGIC_ID; | |
238 | task->tk_pid = atomic_inc_return(&rpc_pid); | |
239 | } | |
240 | #else | |
241 | static inline void rpc_task_set_debuginfo(struct rpc_task *task) | |
242 | { | |
243 | } | |
244 | #endif | |
245 | ||
e6b3c4db TM |
246 | static void rpc_set_active(struct rpc_task *task) |
247 | { | |
4bef61ff | 248 | struct rpc_clnt *clnt; |
e6b3c4db TM |
249 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) |
250 | return; | |
c44fe705 | 251 | rpc_task_set_debuginfo(task); |
e6b3c4db | 252 | /* Add to global list of all tasks */ |
4bef61ff TM |
253 | clnt = task->tk_client; |
254 | if (clnt != NULL) { | |
255 | spin_lock(&clnt->cl_lock); | |
256 | list_add_tail(&task->tk_task, &clnt->cl_tasks); | |
257 | spin_unlock(&clnt->cl_lock); | |
258 | } | |
e6b3c4db TM |
259 | } |
260 | ||
44c28873 TM |
261 | /* |
262 | * Mark an RPC call as having completed by clearing the 'active' bit | |
263 | */ | |
e6b3c4db | 264 | static void rpc_mark_complete_task(struct rpc_task *task) |
44c28873 | 265 | { |
e6b3c4db TM |
266 | smp_mb__before_clear_bit(); |
267 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | |
268 | smp_mb__after_clear_bit(); | |
44c28873 TM |
269 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); |
270 | } | |
271 | ||
272 | /* | |
273 | * Allow callers to wait for completion of an RPC call | |
274 | */ | |
275 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | |
276 | { | |
277 | if (action == NULL) | |
150030b7 | 278 | action = rpc_wait_bit_killable; |
44c28873 | 279 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, |
150030b7 | 280 | action, TASK_KILLABLE); |
44c28873 | 281 | } |
e8914c65 | 282 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); |
44c28873 | 283 | |
1da177e4 LT |
284 | /* |
285 | * Make an RPC task runnable. | |
286 | * | |
cca5172a | 287 | * Note: If the task is ASYNC, this must be called with |
1da177e4 LT |
288 | * the spinlock held to protect the wait queue operation. |
289 | */ | |
290 | static void rpc_make_runnable(struct rpc_task *task) | |
291 | { | |
1da177e4 | 292 | rpc_clear_queued(task); |
cc4dc59e CS |
293 | if (rpc_test_and_set_running(task)) |
294 | return; | |
1da177e4 LT |
295 | if (RPC_IS_ASYNC(task)) { |
296 | int status; | |
297 | ||
65f27f38 | 298 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
32bfb5c0 | 299 | status = queue_work(rpciod_workqueue, &task->u.tk_work); |
1da177e4 LT |
300 | if (status < 0) { |
301 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); | |
302 | task->tk_status = status; | |
303 | return; | |
304 | } | |
305 | } else | |
96651ab3 | 306 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); |
1da177e4 LT |
307 | } |
308 | ||
1da177e4 LT |
309 | /* |
310 | * Prepare for sleeping on a wait queue. | |
311 | * By always appending tasks to the list we ensure FIFO behavior. | |
312 | * NB: An RPC task will only receive interrupt-driven events as long | |
313 | * as it's on a wait queue. | |
314 | */ | |
315 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |
5d00837b | 316 | rpc_action action) |
1da177e4 | 317 | { |
46121cf7 CL |
318 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
319 | task->tk_pid, rpc_qname(q), jiffies); | |
1da177e4 LT |
320 | |
321 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { | |
322 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); | |
323 | return; | |
324 | } | |
325 | ||
1da177e4 LT |
326 | __rpc_add_wait_queue(q, task); |
327 | ||
328 | BUG_ON(task->tk_callback != NULL); | |
329 | task->tk_callback = action; | |
eb276c0e | 330 | __rpc_add_timer(q, task); |
1da177e4 LT |
331 | } |
332 | ||
333 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |
5d00837b | 334 | rpc_action action) |
1da177e4 | 335 | { |
e6b3c4db TM |
336 | /* Mark the task as being activated if so needed */ |
337 | rpc_set_active(task); | |
338 | ||
1da177e4 LT |
339 | /* |
340 | * Protect the queue operations. | |
341 | */ | |
342 | spin_lock_bh(&q->lock); | |
5d00837b | 343 | __rpc_sleep_on(q, task, action); |
1da177e4 LT |
344 | spin_unlock_bh(&q->lock); |
345 | } | |
e8914c65 | 346 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
1da177e4 LT |
347 | |
348 | /** | |
349 | * __rpc_do_wake_up_task - wake up a single rpc_task | |
96ef13b2 | 350 | * @queue: wait queue |
1da177e4 LT |
351 | * @task: task to be woken up |
352 | * | |
353 | * Caller must hold queue->lock, and have cleared the task queued flag. | |
354 | */ | |
96ef13b2 | 355 | static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 356 | { |
46121cf7 CL |
357 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
358 | task->tk_pid, jiffies); | |
1da177e4 LT |
359 | |
360 | #ifdef RPC_DEBUG | |
361 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | |
362 | #endif | |
363 | /* Has the task been executed yet? If not, we cannot wake it up! */ | |
364 | if (!RPC_IS_ACTIVATED(task)) { | |
365 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); | |
366 | return; | |
367 | } | |
368 | ||
96ef13b2 | 369 | __rpc_remove_wait_queue(queue, task); |
1da177e4 LT |
370 | |
371 | rpc_make_runnable(task); | |
372 | ||
46121cf7 | 373 | dprintk("RPC: __rpc_wake_up_task done\n"); |
1da177e4 LT |
374 | } |
375 | ||
376 | /* | |
96ef13b2 | 377 | * Wake up a queued task while the queue lock is being held |
1da177e4 | 378 | */ |
96ef13b2 | 379 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 380 | { |
f5fb7b06 TM |
381 | if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) |
382 | __rpc_do_wake_up_task(queue, task); | |
1da177e4 LT |
383 | } |
384 | ||
1da177e4 | 385 | /* |
96ef13b2 | 386 | * Wake up a task on a specific queue |
1da177e4 | 387 | */ |
96ef13b2 | 388 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 389 | { |
5e4424af | 390 | spin_lock_bh(&queue->lock); |
96ef13b2 | 391 | rpc_wake_up_task_queue_locked(queue, task); |
5e4424af | 392 | spin_unlock_bh(&queue->lock); |
1da177e4 | 393 | } |
96ef13b2 TM |
394 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); |
395 | ||
396 | /* | |
397 | * Wake up the specified task | |
398 | */ | |
fda13939 | 399 | static void rpc_wake_up_task(struct rpc_task *task) |
96ef13b2 TM |
400 | { |
401 | rpc_wake_up_queued_task(task->tk_waitqueue, task); | |
402 | } | |
1da177e4 LT |
403 | |
404 | /* | |
405 | * Wake up the next task on a priority queue. | |
406 | */ | |
407 | static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) | |
408 | { | |
409 | struct list_head *q; | |
410 | struct rpc_task *task; | |
411 | ||
412 | /* | |
3ff7576d | 413 | * Service a batch of tasks from a single owner. |
1da177e4 LT |
414 | */ |
415 | q = &queue->tasks[queue->priority]; | |
416 | if (!list_empty(q)) { | |
417 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
3ff7576d | 418 | if (queue->owner == task->tk_owner) { |
1da177e4 LT |
419 | if (--queue->nr) |
420 | goto out; | |
421 | list_move_tail(&task->u.tk_wait.list, q); | |
422 | } | |
423 | /* | |
424 | * Check if we need to switch queues. | |
425 | */ | |
426 | if (--queue->count) | |
3ff7576d | 427 | goto new_owner; |
1da177e4 LT |
428 | } |
429 | ||
430 | /* | |
431 | * Service the next queue. | |
432 | */ | |
433 | do { | |
434 | if (q == &queue->tasks[0]) | |
435 | q = &queue->tasks[queue->maxpriority]; | |
436 | else | |
437 | q = q - 1; | |
438 | if (!list_empty(q)) { | |
439 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
440 | goto new_queue; | |
441 | } | |
442 | } while (q != &queue->tasks[queue->priority]); | |
443 | ||
444 | rpc_reset_waitqueue_priority(queue); | |
445 | return NULL; | |
446 | ||
447 | new_queue: | |
448 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); | |
3ff7576d TM |
449 | new_owner: |
450 | rpc_set_waitqueue_owner(queue, task->tk_owner); | |
1da177e4 | 451 | out: |
96ef13b2 | 452 | rpc_wake_up_task_queue_locked(queue, task); |
1da177e4 LT |
453 | return task; |
454 | } | |
455 | ||
456 | /* | |
457 | * Wake up the next task on the wait queue. | |
458 | */ | |
459 | struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |
460 | { | |
461 | struct rpc_task *task = NULL; | |
462 | ||
46121cf7 CL |
463 | dprintk("RPC: wake_up_next(%p \"%s\")\n", |
464 | queue, rpc_qname(queue)); | |
5e4424af | 465 | spin_lock_bh(&queue->lock); |
1da177e4 LT |
466 | if (RPC_IS_PRIORITY(queue)) |
467 | task = __rpc_wake_up_next_priority(queue); | |
468 | else { | |
469 | task_for_first(task, &queue->tasks[0]) | |
96ef13b2 | 470 | rpc_wake_up_task_queue_locked(queue, task); |
1da177e4 | 471 | } |
5e4424af | 472 | spin_unlock_bh(&queue->lock); |
1da177e4 LT |
473 | |
474 | return task; | |
475 | } | |
e8914c65 | 476 | EXPORT_SYMBOL_GPL(rpc_wake_up_next); |
1da177e4 LT |
477 | |
478 | /** | |
479 | * rpc_wake_up - wake up all rpc_tasks | |
480 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
481 | * | |
482 | * Grabs queue->lock | |
483 | */ | |
484 | void rpc_wake_up(struct rpc_wait_queue *queue) | |
485 | { | |
e6d83d55 | 486 | struct rpc_task *task, *next; |
1da177e4 | 487 | struct list_head *head; |
e6d83d55 | 488 | |
5e4424af | 489 | spin_lock_bh(&queue->lock); |
1da177e4 LT |
490 | head = &queue->tasks[queue->maxpriority]; |
491 | for (;;) { | |
e6d83d55 | 492 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
96ef13b2 | 493 | rpc_wake_up_task_queue_locked(queue, task); |
1da177e4 LT |
494 | if (head == &queue->tasks[0]) |
495 | break; | |
496 | head--; | |
497 | } | |
5e4424af | 498 | spin_unlock_bh(&queue->lock); |
1da177e4 | 499 | } |
e8914c65 | 500 | EXPORT_SYMBOL_GPL(rpc_wake_up); |
1da177e4 LT |
501 | |
502 | /** | |
503 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. | |
504 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
505 | * @status: status value to set | |
506 | * | |
507 | * Grabs queue->lock | |
508 | */ | |
509 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |
510 | { | |
e6d83d55 | 511 | struct rpc_task *task, *next; |
1da177e4 | 512 | struct list_head *head; |
1da177e4 | 513 | |
5e4424af | 514 | spin_lock_bh(&queue->lock); |
1da177e4 LT |
515 | head = &queue->tasks[queue->maxpriority]; |
516 | for (;;) { | |
e6d83d55 | 517 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
1da177e4 | 518 | task->tk_status = status; |
96ef13b2 | 519 | rpc_wake_up_task_queue_locked(queue, task); |
1da177e4 LT |
520 | } |
521 | if (head == &queue->tasks[0]) | |
522 | break; | |
523 | head--; | |
524 | } | |
5e4424af | 525 | spin_unlock_bh(&queue->lock); |
1da177e4 | 526 | } |
e8914c65 | 527 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); |
1da177e4 | 528 | |
36df9aae TM |
529 | static void __rpc_queue_timer_fn(unsigned long ptr) |
530 | { | |
531 | struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr; | |
532 | struct rpc_task *task, *n; | |
533 | unsigned long expires, now, timeo; | |
534 | ||
535 | spin_lock(&queue->lock); | |
536 | expires = now = jiffies; | |
537 | list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { | |
538 | timeo = task->u.tk_wait.expires; | |
539 | if (time_after_eq(now, timeo)) { | |
36df9aae TM |
540 | dprintk("RPC: %5u timeout\n", task->tk_pid); |
541 | task->tk_status = -ETIMEDOUT; | |
542 | rpc_wake_up_task_queue_locked(queue, task); | |
543 | continue; | |
544 | } | |
545 | if (expires == now || time_after(expires, timeo)) | |
546 | expires = timeo; | |
547 | } | |
548 | if (!list_empty(&queue->timer_list.list)) | |
549 | rpc_set_queue_timer(queue, expires); | |
550 | spin_unlock(&queue->lock); | |
551 | } | |
552 | ||
8014793b TM |
553 | static void __rpc_atrun(struct rpc_task *task) |
554 | { | |
5d00837b | 555 | task->tk_status = 0; |
8014793b TM |
556 | } |
557 | ||
1da177e4 LT |
558 | /* |
559 | * Run a task at a later time | |
560 | */ | |
8014793b | 561 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
1da177e4 LT |
562 | { |
563 | task->tk_timeout = delay; | |
5d00837b | 564 | rpc_sleep_on(&delay_queue, task, __rpc_atrun); |
1da177e4 | 565 | } |
e8914c65 | 566 | EXPORT_SYMBOL_GPL(rpc_delay); |
1da177e4 | 567 | |
4ce70ada TM |
568 | /* |
569 | * Helper to call task->tk_ops->rpc_call_prepare | |
570 | */ | |
aae2006e | 571 | void rpc_prepare_task(struct rpc_task *task) |
4ce70ada TM |
572 | { |
573 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); | |
574 | } | |
575 | ||
d05fdb0c | 576 | /* |
963d8fe5 | 577 | * Helper that calls task->tk_ops->rpc_call_done if it exists |
d05fdb0c | 578 | */ |
abbcf28f | 579 | void rpc_exit_task(struct rpc_task *task) |
d05fdb0c | 580 | { |
abbcf28f | 581 | task->tk_action = NULL; |
963d8fe5 TM |
582 | if (task->tk_ops->rpc_call_done != NULL) { |
583 | task->tk_ops->rpc_call_done(task, task->tk_calldata); | |
d05fdb0c | 584 | if (task->tk_action != NULL) { |
abbcf28f TM |
585 | WARN_ON(RPC_ASSASSINATED(task)); |
586 | /* Always release the RPC slot and buffer memory */ | |
587 | xprt_release(task); | |
d05fdb0c TM |
588 | } |
589 | } | |
d05fdb0c | 590 | } |
e8914c65 | 591 | EXPORT_SYMBOL_GPL(rpc_exit_task); |
d05fdb0c | 592 | |
bbd5a1f9 TM |
593 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) |
594 | { | |
a86dc496 | 595 | if (ops->rpc_release != NULL) |
bbd5a1f9 | 596 | ops->rpc_release(calldata); |
bbd5a1f9 TM |
597 | } |
598 | ||
1da177e4 LT |
599 | /* |
600 | * This is the RPC `scheduler' (or rather, the finite state machine). | |
601 | */ | |
2efef837 | 602 | static void __rpc_execute(struct rpc_task *task) |
1da177e4 | 603 | { |
eb9b55ab TM |
604 | struct rpc_wait_queue *queue; |
605 | int task_is_async = RPC_IS_ASYNC(task); | |
606 | int status = 0; | |
1da177e4 | 607 | |
46121cf7 CL |
608 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", |
609 | task->tk_pid, task->tk_flags); | |
1da177e4 LT |
610 | |
611 | BUG_ON(RPC_IS_QUEUED(task)); | |
612 | ||
d05fdb0c | 613 | for (;;) { |
1da177e4 LT |
614 | |
615 | /* | |
616 | * Execute any pending callback. | |
617 | */ | |
a486aeda | 618 | if (task->tk_callback) { |
1da177e4 | 619 | void (*save_callback)(struct rpc_task *); |
cca5172a YH |
620 | |
621 | /* | |
a486aeda BF |
622 | * We set tk_callback to NULL before calling it, |
623 | * in case it sets the tk_callback field itself: | |
1da177e4 | 624 | */ |
a486aeda BF |
625 | save_callback = task->tk_callback; |
626 | task->tk_callback = NULL; | |
1da177e4 | 627 | save_callback(task); |
1da177e4 LT |
628 | } |
629 | ||
630 | /* | |
631 | * Perform the next FSM step. | |
632 | * tk_action may be NULL when the task has been killed | |
633 | * by someone else. | |
634 | */ | |
635 | if (!RPC_IS_QUEUED(task)) { | |
abbcf28f | 636 | if (task->tk_action == NULL) |
1da177e4 | 637 | break; |
abbcf28f | 638 | task->tk_action(task); |
1da177e4 LT |
639 | } |
640 | ||
641 | /* | |
642 | * Lockless check for whether task is sleeping or not. | |
643 | */ | |
644 | if (!RPC_IS_QUEUED(task)) | |
645 | continue; | |
eb9b55ab TM |
646 | /* |
647 | * The queue->lock protects against races with | |
648 | * rpc_make_runnable(). | |
649 | * | |
650 | * Note that once we clear RPC_TASK_RUNNING on an asynchronous | |
651 | * rpc_task, rpc_make_runnable() can assign it to a | |
652 | * different workqueue. We therefore cannot assume that the | |
653 | * rpc_task pointer may still be dereferenced. | |
654 | */ | |
655 | queue = task->tk_waitqueue; | |
656 | spin_lock_bh(&queue->lock); | |
657 | if (!RPC_IS_QUEUED(task)) { | |
658 | spin_unlock_bh(&queue->lock); | |
1da177e4 LT |
659 | continue; |
660 | } | |
eb9b55ab TM |
661 | rpc_clear_running(task); |
662 | spin_unlock_bh(&queue->lock); | |
663 | if (task_is_async) | |
664 | return; | |
1da177e4 LT |
665 | |
666 | /* sync task: sleep here */ | |
46121cf7 | 667 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); |
96651ab3 | 668 | status = out_of_line_wait_on_bit(&task->tk_runstate, |
150030b7 MW |
669 | RPC_TASK_QUEUED, rpc_wait_bit_killable, |
670 | TASK_KILLABLE); | |
96651ab3 | 671 | if (status == -ERESTARTSYS) { |
1da177e4 LT |
672 | /* |
673 | * When a sync task receives a signal, it exits with | |
674 | * -ERESTARTSYS. In order to catch any callbacks that | |
675 | * clean up after sleeping on some queue, we don't | |
676 | * break the loop here, but go around once more. | |
677 | */ | |
46121cf7 | 678 | dprintk("RPC: %5u got signal\n", task->tk_pid); |
96651ab3 TM |
679 | task->tk_flags |= RPC_TASK_KILLED; |
680 | rpc_exit(task, -ERESTARTSYS); | |
681 | rpc_wake_up_task(task); | |
1da177e4 LT |
682 | } |
683 | rpc_set_running(task); | |
46121cf7 | 684 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
1da177e4 LT |
685 | } |
686 | ||
46121cf7 CL |
687 | dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, |
688 | task->tk_status); | |
1da177e4 LT |
689 | /* Release all resources associated with the task */ |
690 | rpc_release_task(task); | |
1da177e4 LT |
691 | } |
692 | ||
693 | /* | |
694 | * User-visible entry point to the scheduler. | |
695 | * | |
696 | * This may be called recursively if e.g. an async NFS task updates | |
697 | * the attributes and finds that dirty pages must be flushed. | |
698 | * NOTE: Upon exit of this function the task is guaranteed to be | |
699 | * released. In particular note that tk_release() will have | |
700 | * been called, so your task memory may have been freed. | |
701 | */ | |
2efef837 | 702 | void rpc_execute(struct rpc_task *task) |
1da177e4 | 703 | { |
44c28873 | 704 | rpc_set_active(task); |
1da177e4 | 705 | rpc_set_running(task); |
2efef837 | 706 | __rpc_execute(task); |
1da177e4 LT |
707 | } |
708 | ||
65f27f38 | 709 | static void rpc_async_schedule(struct work_struct *work) |
1da177e4 | 710 | { |
65f27f38 | 711 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
1da177e4 LT |
712 | } |
713 | ||
aa3d1fae CL |
714 | struct rpc_buffer { |
715 | size_t len; | |
716 | char data[]; | |
717 | }; | |
718 | ||
02107148 CL |
719 | /** |
720 | * rpc_malloc - allocate an RPC buffer | |
721 | * @task: RPC task that will use this buffer | |
722 | * @size: requested byte size | |
1da177e4 | 723 | * |
c5a4dd8b CL |
724 | * To prevent rpciod from hanging, this allocator never sleeps, |
725 | * returning NULL if the request cannot be serviced immediately. | |
726 | * The caller can arrange to sleep in a way that is safe for rpciod. | |
727 | * | |
728 | * Most requests are 'small' (under 2KiB) and can be serviced from a | |
729 | * mempool, ensuring that NFS reads and writes can always proceed, | |
730 | * and that there is good locality of reference for these buffers. | |
731 | * | |
1da177e4 | 732 | * In order to avoid memory starvation triggering more writebacks of |
c5a4dd8b | 733 | * NFS requests, we avoid using GFP_KERNEL. |
1da177e4 | 734 | */ |
c5a4dd8b | 735 | void *rpc_malloc(struct rpc_task *task, size_t size) |
1da177e4 | 736 | { |
aa3d1fae | 737 | struct rpc_buffer *buf; |
c5a4dd8b | 738 | gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; |
1da177e4 | 739 | |
aa3d1fae | 740 | size += sizeof(struct rpc_buffer); |
c5a4dd8b CL |
741 | if (size <= RPC_BUFFER_MAXSIZE) |
742 | buf = mempool_alloc(rpc_buffer_mempool, gfp); | |
1da177e4 | 743 | else |
c5a4dd8b | 744 | buf = kmalloc(size, gfp); |
ddce40df PZ |
745 | |
746 | if (!buf) | |
747 | return NULL; | |
748 | ||
aa3d1fae | 749 | buf->len = size; |
215d0678 | 750 | dprintk("RPC: %5u allocated buffer of size %zu at %p\n", |
c5a4dd8b | 751 | task->tk_pid, size, buf); |
aa3d1fae | 752 | return &buf->data; |
1da177e4 | 753 | } |
12444809 | 754 | EXPORT_SYMBOL_GPL(rpc_malloc); |
1da177e4 | 755 | |
02107148 CL |
756 | /** |
757 | * rpc_free - free buffer allocated via rpc_malloc | |
c5a4dd8b | 758 | * @buffer: buffer to free |
02107148 CL |
759 | * |
760 | */ | |
c5a4dd8b | 761 | void rpc_free(void *buffer) |
1da177e4 | 762 | { |
aa3d1fae CL |
763 | size_t size; |
764 | struct rpc_buffer *buf; | |
02107148 | 765 | |
c5a4dd8b CL |
766 | if (!buffer) |
767 | return; | |
aa3d1fae CL |
768 | |
769 | buf = container_of(buffer, struct rpc_buffer, data); | |
770 | size = buf->len; | |
c5a4dd8b | 771 | |
215d0678 | 772 | dprintk("RPC: freeing buffer of size %zu at %p\n", |
c5a4dd8b | 773 | size, buf); |
aa3d1fae | 774 | |
c5a4dd8b CL |
775 | if (size <= RPC_BUFFER_MAXSIZE) |
776 | mempool_free(buf, rpc_buffer_mempool); | |
777 | else | |
778 | kfree(buf); | |
1da177e4 | 779 | } |
12444809 | 780 | EXPORT_SYMBOL_GPL(rpc_free); |
1da177e4 LT |
781 | |
782 | /* | |
783 | * Creation and deletion of RPC task structures | |
784 | */ | |
47fe0648 | 785 | static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) |
1da177e4 LT |
786 | { |
787 | memset(task, 0, sizeof(*task)); | |
44c28873 | 788 | atomic_set(&task->tk_count, 1); |
84115e1c TM |
789 | task->tk_flags = task_setup_data->flags; |
790 | task->tk_ops = task_setup_data->callback_ops; | |
791 | task->tk_calldata = task_setup_data->callback_data; | |
6529eba0 | 792 | INIT_LIST_HEAD(&task->tk_task); |
1da177e4 LT |
793 | |
794 | /* Initialize retry counters */ | |
795 | task->tk_garb_retry = 2; | |
796 | task->tk_cred_retry = 2; | |
797 | ||
3ff7576d TM |
798 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; |
799 | task->tk_owner = current->tgid; | |
1da177e4 LT |
800 | |
801 | /* Initialize workqueue for async tasks */ | |
32bfb5c0 | 802 | task->tk_workqueue = task_setup_data->workqueue; |
1da177e4 | 803 | |
84115e1c TM |
804 | task->tk_client = task_setup_data->rpc_client; |
805 | if (task->tk_client != NULL) { | |
806 | kref_get(&task->tk_client->cl_kref); | |
807 | if (task->tk_client->cl_softrtry) | |
1da177e4 | 808 | task->tk_flags |= RPC_TASK_SOFT; |
1da177e4 LT |
809 | } |
810 | ||
84115e1c TM |
811 | if (task->tk_ops->rpc_call_prepare != NULL) |
812 | task->tk_action = rpc_prepare_task; | |
963d8fe5 | 813 | |
b3ef8b3b | 814 | if (task_setup_data->rpc_message != NULL) { |
4ccda2cd TM |
815 | task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc; |
816 | task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp; | |
817 | task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp; | |
b3ef8b3b | 818 | /* Bind the user cred */ |
4ccda2cd | 819 | rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags); |
b3ef8b3b TM |
820 | if (task->tk_action == NULL) |
821 | rpc_call_start(task); | |
822 | } | |
823 | ||
ef759a2e CL |
824 | /* starting timestamp */ |
825 | task->tk_start = jiffies; | |
826 | ||
46121cf7 | 827 | dprintk("RPC: new task initialized, procpid %u\n", |
ba25f9dc | 828 | task_pid_nr(current)); |
1da177e4 LT |
829 | } |
830 | ||
831 | static struct rpc_task * | |
832 | rpc_alloc_task(void) | |
833 | { | |
834 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); | |
835 | } | |
836 | ||
1da177e4 | 837 | /* |
90c5755f | 838 | * Create a new task for the specified client. |
1da177e4 | 839 | */ |
84115e1c | 840 | struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) |
1da177e4 | 841 | { |
e8f5d77c TM |
842 | struct rpc_task *task = setup_data->task; |
843 | unsigned short flags = 0; | |
844 | ||
845 | if (task == NULL) { | |
846 | task = rpc_alloc_task(); | |
847 | if (task == NULL) | |
848 | goto out; | |
849 | flags = RPC_TASK_DYNAMIC; | |
850 | } | |
1da177e4 | 851 | |
84115e1c | 852 | rpc_init_task(task, setup_data); |
1da177e4 | 853 | |
e8f5d77c | 854 | task->tk_flags |= flags; |
46121cf7 | 855 | dprintk("RPC: allocated task %p\n", task); |
1da177e4 LT |
856 | out: |
857 | return task; | |
1da177e4 LT |
858 | } |
859 | ||
32bfb5c0 | 860 | static void rpc_free_task(struct rpc_task *task) |
1da177e4 | 861 | { |
963d8fe5 TM |
862 | const struct rpc_call_ops *tk_ops = task->tk_ops; |
863 | void *calldata = task->tk_calldata; | |
1da177e4 | 864 | |
5e4424af TM |
865 | if (task->tk_flags & RPC_TASK_DYNAMIC) { |
866 | dprintk("RPC: %5u freeing task\n", task->tk_pid); | |
867 | mempool_free(task, rpc_task_mempool); | |
868 | } | |
32bfb5c0 TM |
869 | rpc_release_calldata(tk_ops, calldata); |
870 | } | |
871 | ||
872 | static void rpc_async_release(struct work_struct *work) | |
873 | { | |
874 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); | |
875 | } | |
876 | ||
877 | void rpc_put_task(struct rpc_task *task) | |
878 | { | |
e6b3c4db TM |
879 | if (!atomic_dec_and_test(&task->tk_count)) |
880 | return; | |
881 | /* Release resources */ | |
882 | if (task->tk_rqstp) | |
883 | xprt_release(task); | |
884 | if (task->tk_msg.rpc_cred) | |
885 | rpcauth_unbindcred(task); | |
886 | if (task->tk_client) { | |
887 | rpc_release_client(task->tk_client); | |
888 | task->tk_client = NULL; | |
889 | } | |
32bfb5c0 TM |
890 | if (task->tk_workqueue != NULL) { |
891 | INIT_WORK(&task->u.tk_work, rpc_async_release); | |
892 | queue_work(task->tk_workqueue, &task->u.tk_work); | |
893 | } else | |
894 | rpc_free_task(task); | |
e6b3c4db | 895 | } |
e8914c65 | 896 | EXPORT_SYMBOL_GPL(rpc_put_task); |
e6b3c4db | 897 | |
bde8f00c | 898 | static void rpc_release_task(struct rpc_task *task) |
e6b3c4db | 899 | { |
1da177e4 LT |
900 | #ifdef RPC_DEBUG |
901 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | |
902 | #endif | |
46121cf7 | 903 | dprintk("RPC: %5u release task\n", task->tk_pid); |
1da177e4 | 904 | |
6529eba0 | 905 | if (!list_empty(&task->tk_task)) { |
4bef61ff | 906 | struct rpc_clnt *clnt = task->tk_client; |
6529eba0 | 907 | /* Remove from client task list */ |
4bef61ff | 908 | spin_lock(&clnt->cl_lock); |
6529eba0 | 909 | list_del(&task->tk_task); |
4bef61ff | 910 | spin_unlock(&clnt->cl_lock); |
6529eba0 | 911 | } |
1da177e4 | 912 | BUG_ON (RPC_IS_QUEUED(task)); |
1da177e4 | 913 | |
1da177e4 LT |
914 | #ifdef RPC_DEBUG |
915 | task->tk_magic = 0; | |
916 | #endif | |
e6b3c4db TM |
917 | /* Wake up anyone who is waiting for task completion */ |
918 | rpc_mark_complete_task(task); | |
919 | ||
920 | rpc_put_task(task); | |
1da177e4 LT |
921 | } |
922 | ||
1da177e4 LT |
923 | /* |
924 | * Kill all tasks for the given client. | |
925 | * XXX: kill their descendants as well? | |
926 | */ | |
4bef61ff | 927 | void rpc_killall_tasks(struct rpc_clnt *clnt) |
1da177e4 LT |
928 | { |
929 | struct rpc_task *rovr; | |
1da177e4 | 930 | |
1da177e4 | 931 | |
4bef61ff TM |
932 | if (list_empty(&clnt->cl_tasks)) |
933 | return; | |
934 | dprintk("RPC: killing all tasks for client %p\n", clnt); | |
935 | /* | |
936 | * Spin lock all_tasks to prevent changes... | |
937 | */ | |
938 | spin_lock(&clnt->cl_lock); | |
939 | list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { | |
1da177e4 LT |
940 | if (! RPC_IS_ACTIVATED(rovr)) |
941 | continue; | |
6529eba0 | 942 | if (!(rovr->tk_flags & RPC_TASK_KILLED)) { |
1da177e4 LT |
943 | rovr->tk_flags |= RPC_TASK_KILLED; |
944 | rpc_exit(rovr, -EIO); | |
945 | rpc_wake_up_task(rovr); | |
946 | } | |
947 | } | |
4bef61ff | 948 | spin_unlock(&clnt->cl_lock); |
1da177e4 | 949 | } |
e8914c65 | 950 | EXPORT_SYMBOL_GPL(rpc_killall_tasks); |
1da177e4 | 951 | |
b247bbf1 TM |
952 | int rpciod_up(void) |
953 | { | |
954 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; | |
955 | } | |
956 | ||
957 | void rpciod_down(void) | |
958 | { | |
959 | module_put(THIS_MODULE); | |
960 | } | |
961 | ||
1da177e4 | 962 | /* |
b247bbf1 | 963 | * Start up the rpciod workqueue. |
1da177e4 | 964 | */ |
b247bbf1 | 965 | static int rpciod_start(void) |
1da177e4 LT |
966 | { |
967 | struct workqueue_struct *wq; | |
ab418d70 | 968 | |
1da177e4 LT |
969 | /* |
970 | * Create the rpciod thread and wait for it to start. | |
971 | */ | |
ab418d70 | 972 | dprintk("RPC: creating workqueue rpciod\n"); |
1da177e4 | 973 | wq = create_workqueue("rpciod"); |
1da177e4 | 974 | rpciod_workqueue = wq; |
b247bbf1 | 975 | return rpciod_workqueue != NULL; |
1da177e4 LT |
976 | } |
977 | ||
b247bbf1 | 978 | static void rpciod_stop(void) |
1da177e4 | 979 | { |
b247bbf1 | 980 | struct workqueue_struct *wq = NULL; |
ab418d70 | 981 | |
b247bbf1 TM |
982 | if (rpciod_workqueue == NULL) |
983 | return; | |
ab418d70 | 984 | dprintk("RPC: destroying workqueue rpciod\n"); |
1da177e4 | 985 | |
b247bbf1 TM |
986 | wq = rpciod_workqueue; |
987 | rpciod_workqueue = NULL; | |
988 | destroy_workqueue(wq); | |
1da177e4 LT |
989 | } |
990 | ||
1da177e4 LT |
991 | void |
992 | rpc_destroy_mempool(void) | |
993 | { | |
b247bbf1 | 994 | rpciod_stop(); |
1da177e4 LT |
995 | if (rpc_buffer_mempool) |
996 | mempool_destroy(rpc_buffer_mempool); | |
997 | if (rpc_task_mempool) | |
998 | mempool_destroy(rpc_task_mempool); | |
1a1d92c1 AD |
999 | if (rpc_task_slabp) |
1000 | kmem_cache_destroy(rpc_task_slabp); | |
1001 | if (rpc_buffer_slabp) | |
1002 | kmem_cache_destroy(rpc_buffer_slabp); | |
f6a1cc89 | 1003 | rpc_destroy_wait_queue(&delay_queue); |
1da177e4 LT |
1004 | } |
1005 | ||
1006 | int | |
1007 | rpc_init_mempool(void) | |
1008 | { | |
f6a1cc89 TM |
1009 | /* |
1010 | * The following is not strictly a mempool initialisation, | |
1011 | * but there is no harm in doing it here | |
1012 | */ | |
1013 | rpc_init_wait_queue(&delay_queue, "delayq"); | |
1014 | if (!rpciod_start()) | |
1015 | goto err_nomem; | |
1016 | ||
1da177e4 LT |
1017 | rpc_task_slabp = kmem_cache_create("rpc_tasks", |
1018 | sizeof(struct rpc_task), | |
1019 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 1020 | NULL); |
1da177e4 LT |
1021 | if (!rpc_task_slabp) |
1022 | goto err_nomem; | |
1023 | rpc_buffer_slabp = kmem_cache_create("rpc_buffers", | |
1024 | RPC_BUFFER_MAXSIZE, | |
1025 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 1026 | NULL); |
1da177e4 LT |
1027 | if (!rpc_buffer_slabp) |
1028 | goto err_nomem; | |
93d2341c MD |
1029 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, |
1030 | rpc_task_slabp); | |
1da177e4 LT |
1031 | if (!rpc_task_mempool) |
1032 | goto err_nomem; | |
93d2341c MD |
1033 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, |
1034 | rpc_buffer_slabp); | |
1da177e4 LT |
1035 | if (!rpc_buffer_mempool) |
1036 | goto err_nomem; | |
1037 | return 0; | |
1038 | err_nomem: | |
1039 | rpc_destroy_mempool(); | |
1040 | return -ENOMEM; | |
1041 | } |