]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - net/sunrpc/sched.c
[PATCH] NFS: Remove unused NFS inode field readdir_timestamp.
[mirror_ubuntu-focal-kernel.git] / net / sunrpc / sched.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/sched.c
3 *
4 * Scheduling for synchronous and asynchronous RPC requests.
5 *
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7 *
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 */
11
12#include <linux/module.h>
13
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/mempool.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/spinlock.h>
21
22#include <linux/sunrpc/clnt.h>
23#include <linux/sunrpc/xprt.h>
24
25#ifdef RPC_DEBUG
26#define RPCDBG_FACILITY RPCDBG_SCHED
27#define RPC_TASK_MAGIC_ID 0xf00baa
28static int rpc_task_id;
29#endif
30
31/*
32 * RPC slabs and memory pools
33 */
34#define RPC_BUFFER_MAXSIZE (2048)
35#define RPC_BUFFER_POOLSIZE (8)
36#define RPC_TASK_POOLSIZE (8)
37static kmem_cache_t *rpc_task_slabp;
38static kmem_cache_t *rpc_buffer_slabp;
39static mempool_t *rpc_task_mempool;
40static mempool_t *rpc_buffer_mempool;
41
42static void __rpc_default_timer(struct rpc_task *task);
43static void rpciod_killall(void);
44static void rpc_free(struct rpc_task *task);
45
46static void rpc_async_schedule(void *);
47
48/*
49 * RPC tasks that create another task (e.g. for contacting the portmapper)
50 * will wait on this queue for their child's completion
51 */
52static RPC_WAITQ(childq, "childq");
53
54/*
55 * RPC tasks sit here while waiting for conditions to improve.
56 */
57static RPC_WAITQ(delay_queue, "delayq");
58
59/*
60 * All RPC tasks are linked into this list
61 */
62static LIST_HEAD(all_tasks);
63
64/*
65 * rpciod-related stuff
66 */
67static DECLARE_MUTEX(rpciod_sema);
68static unsigned int rpciod_users;
69static struct workqueue_struct *rpciod_workqueue;
70
71/*
72 * Spinlock for other critical sections of code.
73 */
74static DEFINE_SPINLOCK(rpc_sched_lock);
75
76/*
77 * Disable the timer for a given RPC task. Should be called with
78 * queue->lock and bh_disabled in order to avoid races within
79 * rpc_run_timer().
80 */
81static inline void
82__rpc_disable_timer(struct rpc_task *task)
83{
84 dprintk("RPC: %4d disabling timer\n", task->tk_pid);
85 task->tk_timeout_fn = NULL;
86 task->tk_timeout = 0;
87}
88
89/*
90 * Run a timeout function.
91 * We use the callback in order to allow __rpc_wake_up_task()
92 * and friends to disable the timer synchronously on SMP systems
93 * without calling del_timer_sync(). The latter could cause a
94 * deadlock if called while we're holding spinlocks...
95 */
96static void rpc_run_timer(struct rpc_task *task)
97{
98 void (*callback)(struct rpc_task *);
99
100 callback = task->tk_timeout_fn;
101 task->tk_timeout_fn = NULL;
102 if (callback && RPC_IS_QUEUED(task)) {
103 dprintk("RPC: %4d running timer\n", task->tk_pid);
104 callback(task);
105 }
106 smp_mb__before_clear_bit();
107 clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
108 smp_mb__after_clear_bit();
109}
110
111/*
112 * Set up a timer for the current task.
113 */
114static inline void
115__rpc_add_timer(struct rpc_task *task, rpc_action timer)
116{
117 if (!task->tk_timeout)
118 return;
119
120 dprintk("RPC: %4d setting alarm for %lu ms\n",
121 task->tk_pid, task->tk_timeout * 1000 / HZ);
122
123 if (timer)
124 task->tk_timeout_fn = timer;
125 else
126 task->tk_timeout_fn = __rpc_default_timer;
127 set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
128 mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
129}
130
131/*
132 * Delete any timer for the current task. Because we use del_timer_sync(),
133 * this function should never be called while holding queue->lock.
134 */
135static void
136rpc_delete_timer(struct rpc_task *task)
137{
138 if (RPC_IS_QUEUED(task))
139 return;
140 if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
141 del_singleshot_timer_sync(&task->tk_timer);
142 dprintk("RPC: %4d deleting timer\n", task->tk_pid);
143 }
144}
145
146/*
147 * Add new request to a priority queue.
148 */
149static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
150{
151 struct list_head *q;
152 struct rpc_task *t;
153
154 INIT_LIST_HEAD(&task->u.tk_wait.links);
155 q = &queue->tasks[task->tk_priority];
156 if (unlikely(task->tk_priority > queue->maxpriority))
157 q = &queue->tasks[queue->maxpriority];
158 list_for_each_entry(t, q, u.tk_wait.list) {
159 if (t->tk_cookie == task->tk_cookie) {
160 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
161 return;
162 }
163 }
164 list_add_tail(&task->u.tk_wait.list, q);
165}
166
167/*
168 * Add new request to wait queue.
169 *
170 * Swapper tasks always get inserted at the head of the queue.
171 * This should avoid many nasty memory deadlocks and hopefully
172 * improve overall performance.
173 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
174 */
175static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
176{
177 BUG_ON (RPC_IS_QUEUED(task));
178
179 if (RPC_IS_PRIORITY(queue))
180 __rpc_add_wait_queue_priority(queue, task);
181 else if (RPC_IS_SWAPPER(task))
182 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
183 else
184 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
185 task->u.tk_wait.rpc_waitq = queue;
186 rpc_set_queued(task);
187
188 dprintk("RPC: %4d added to queue %p \"%s\"\n",
189 task->tk_pid, queue, rpc_qname(queue));
190}
191
192/*
193 * Remove request from a priority queue.
194 */
195static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
196{
197 struct rpc_task *t;
198
199 if (!list_empty(&task->u.tk_wait.links)) {
200 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
201 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
202 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
203 }
204 list_del(&task->u.tk_wait.list);
205}
206
207/*
208 * Remove request from queue.
209 * Note: must be called with spin lock held.
210 */
211static void __rpc_remove_wait_queue(struct rpc_task *task)
212{
213 struct rpc_wait_queue *queue;
214 queue = task->u.tk_wait.rpc_waitq;
215
216 if (RPC_IS_PRIORITY(queue))
217 __rpc_remove_wait_queue_priority(task);
218 else
219 list_del(&task->u.tk_wait.list);
220 dprintk("RPC: %4d removed from queue %p \"%s\"\n",
221 task->tk_pid, queue, rpc_qname(queue));
222}
223
224static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
225{
226 queue->priority = priority;
227 queue->count = 1 << (priority * 2);
228}
229
230static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie)
231{
232 queue->cookie = cookie;
233 queue->nr = RPC_BATCH_COUNT;
234}
235
236static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
237{
238 rpc_set_waitqueue_priority(queue, queue->maxpriority);
239 rpc_set_waitqueue_cookie(queue, 0);
240}
241
242static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio)
243{
244 int i;
245
246 spin_lock_init(&queue->lock);
247 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
248 INIT_LIST_HEAD(&queue->tasks[i]);
249 queue->maxpriority = maxprio;
250 rpc_reset_waitqueue_priority(queue);
251#ifdef RPC_DEBUG
252 queue->name = qname;
253#endif
254}
255
256void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
257{
258 __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);
259}
260
261void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
262{
263 __rpc_init_priority_wait_queue(queue, qname, 0);
264}
265EXPORT_SYMBOL(rpc_init_wait_queue);
266
267/*
268 * Make an RPC task runnable.
269 *
270 * Note: If the task is ASYNC, this must be called with
271 * the spinlock held to protect the wait queue operation.
272 */
273static void rpc_make_runnable(struct rpc_task *task)
274{
275 int do_ret;
276
277 BUG_ON(task->tk_timeout_fn);
278 do_ret = rpc_test_and_set_running(task);
279 rpc_clear_queued(task);
280 if (do_ret)
281 return;
282 if (RPC_IS_ASYNC(task)) {
283 int status;
284
285 INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
286 status = queue_work(task->tk_workqueue, &task->u.tk_work);
287 if (status < 0) {
288 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
289 task->tk_status = status;
290 return;
291 }
292 } else
293 wake_up(&task->u.tk_wait.waitq);
294}
295
296/*
297 * Place a newly initialized task on the workqueue.
298 */
299static inline void
300rpc_schedule_run(struct rpc_task *task)
301{
302 /* Don't run a child twice! */
303 if (RPC_IS_ACTIVATED(task))
304 return;
305 task->tk_active = 1;
306 rpc_make_runnable(task);
307}
308
309/*
310 * Prepare for sleeping on a wait queue.
311 * By always appending tasks to the list we ensure FIFO behavior.
312 * NB: An RPC task will only receive interrupt-driven events as long
313 * as it's on a wait queue.
314 */
315static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
316 rpc_action action, rpc_action timer)
317{
318 dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
319 rpc_qname(q), jiffies);
320
321 if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
322 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
323 return;
324 }
325
326 /* Mark the task as being activated if so needed */
327 if (!RPC_IS_ACTIVATED(task))
328 task->tk_active = 1;
329
330 __rpc_add_wait_queue(q, task);
331
332 BUG_ON(task->tk_callback != NULL);
333 task->tk_callback = action;
334 __rpc_add_timer(task, timer);
335}
336
337void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
338 rpc_action action, rpc_action timer)
339{
340 /*
341 * Protect the queue operations.
342 */
343 spin_lock_bh(&q->lock);
344 __rpc_sleep_on(q, task, action, timer);
345 spin_unlock_bh(&q->lock);
346}
347
348/**
349 * __rpc_do_wake_up_task - wake up a single rpc_task
350 * @task: task to be woken up
351 *
352 * Caller must hold queue->lock, and have cleared the task queued flag.
353 */
354static void __rpc_do_wake_up_task(struct rpc_task *task)
355{
356 dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies);
357
358#ifdef RPC_DEBUG
359 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
360#endif
361 /* Has the task been executed yet? If not, we cannot wake it up! */
362 if (!RPC_IS_ACTIVATED(task)) {
363 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
364 return;
365 }
366
367 __rpc_disable_timer(task);
368 __rpc_remove_wait_queue(task);
369
370 rpc_make_runnable(task);
371
372 dprintk("RPC: __rpc_wake_up_task done\n");
373}
374
375/*
376 * Wake up the specified task
377 */
378static void __rpc_wake_up_task(struct rpc_task *task)
379{
380 if (rpc_start_wakeup(task)) {
381 if (RPC_IS_QUEUED(task))
382 __rpc_do_wake_up_task(task);
383 rpc_finish_wakeup(task);
384 }
385}
386
387/*
388 * Default timeout handler if none specified by user
389 */
390static void
391__rpc_default_timer(struct rpc_task *task)
392{
393 dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
394 task->tk_status = -ETIMEDOUT;
395 rpc_wake_up_task(task);
396}
397
398/*
399 * Wake up the specified task
400 */
401void rpc_wake_up_task(struct rpc_task *task)
402{
403 if (rpc_start_wakeup(task)) {
404 if (RPC_IS_QUEUED(task)) {
405 struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
406
407 spin_lock_bh(&queue->lock);
408 __rpc_do_wake_up_task(task);
409 spin_unlock_bh(&queue->lock);
410 }
411 rpc_finish_wakeup(task);
412 }
413}
414
415/*
416 * Wake up the next task on a priority queue.
417 */
418static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
419{
420 struct list_head *q;
421 struct rpc_task *task;
422
423 /*
424 * Service a batch of tasks from a single cookie.
425 */
426 q = &queue->tasks[queue->priority];
427 if (!list_empty(q)) {
428 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
429 if (queue->cookie == task->tk_cookie) {
430 if (--queue->nr)
431 goto out;
432 list_move_tail(&task->u.tk_wait.list, q);
433 }
434 /*
435 * Check if we need to switch queues.
436 */
437 if (--queue->count)
438 goto new_cookie;
439 }
440
441 /*
442 * Service the next queue.
443 */
444 do {
445 if (q == &queue->tasks[0])
446 q = &queue->tasks[queue->maxpriority];
447 else
448 q = q - 1;
449 if (!list_empty(q)) {
450 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
451 goto new_queue;
452 }
453 } while (q != &queue->tasks[queue->priority]);
454
455 rpc_reset_waitqueue_priority(queue);
456 return NULL;
457
458new_queue:
459 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
460new_cookie:
461 rpc_set_waitqueue_cookie(queue, task->tk_cookie);
462out:
463 __rpc_wake_up_task(task);
464 return task;
465}
466
467/*
468 * Wake up the next task on the wait queue.
469 */
470struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
471{
472 struct rpc_task *task = NULL;
473
474 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
475 spin_lock_bh(&queue->lock);
476 if (RPC_IS_PRIORITY(queue))
477 task = __rpc_wake_up_next_priority(queue);
478 else {
479 task_for_first(task, &queue->tasks[0])
480 __rpc_wake_up_task(task);
481 }
482 spin_unlock_bh(&queue->lock);
483
484 return task;
485}
486
487/**
488 * rpc_wake_up - wake up all rpc_tasks
489 * @queue: rpc_wait_queue on which the tasks are sleeping
490 *
491 * Grabs queue->lock
492 */
493void rpc_wake_up(struct rpc_wait_queue *queue)
494{
495 struct rpc_task *task;
496
497 struct list_head *head;
498 spin_lock_bh(&queue->lock);
499 head = &queue->tasks[queue->maxpriority];
500 for (;;) {
501 while (!list_empty(head)) {
502 task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
503 __rpc_wake_up_task(task);
504 }
505 if (head == &queue->tasks[0])
506 break;
507 head--;
508 }
509 spin_unlock_bh(&queue->lock);
510}
511
512/**
513 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
514 * @queue: rpc_wait_queue on which the tasks are sleeping
515 * @status: status value to set
516 *
517 * Grabs queue->lock
518 */
519void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
520{
521 struct list_head *head;
522 struct rpc_task *task;
523
524 spin_lock_bh(&queue->lock);
525 head = &queue->tasks[queue->maxpriority];
526 for (;;) {
527 while (!list_empty(head)) {
528 task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
529 task->tk_status = status;
530 __rpc_wake_up_task(task);
531 }
532 if (head == &queue->tasks[0])
533 break;
534 head--;
535 }
536 spin_unlock_bh(&queue->lock);
537}
538
539/*
540 * Run a task at a later time
541 */
542static void __rpc_atrun(struct rpc_task *);
543void
544rpc_delay(struct rpc_task *task, unsigned long delay)
545{
546 task->tk_timeout = delay;
547 rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
548}
549
550static void
551__rpc_atrun(struct rpc_task *task)
552{
553 task->tk_status = 0;
554 rpc_wake_up_task(task);
555}
556
d05fdb0c
TM
557/*
558 * Helper that calls task->tk_exit if it exists and then returns
559 * true if we should exit __rpc_execute.
560 */
561static inline int __rpc_do_exit(struct rpc_task *task)
562{
563 if (task->tk_exit != NULL) {
564 lock_kernel();
565 task->tk_exit(task);
566 unlock_kernel();
567 /* If tk_action is non-null, we should restart the call */
568 if (task->tk_action != NULL) {
569 if (!RPC_ASSASSINATED(task)) {
570 /* Release RPC slot and buffer memory */
571 xprt_release(task);
572 rpc_free(task);
573 return 0;
574 }
575 printk(KERN_ERR "RPC: dead task tried to walk away.\n");
576 }
577 }
578 return 1;
579}
580
1da177e4
LT
581/*
582 * This is the RPC `scheduler' (or rather, the finite state machine).
583 */
584static int __rpc_execute(struct rpc_task *task)
585{
586 int status = 0;
587
588 dprintk("RPC: %4d rpc_execute flgs %x\n",
589 task->tk_pid, task->tk_flags);
590
591 BUG_ON(RPC_IS_QUEUED(task));
592
d05fdb0c 593 for (;;) {
1da177e4
LT
594 /*
595 * Garbage collection of pending timers...
596 */
597 rpc_delete_timer(task);
598
599 /*
600 * Execute any pending callback.
601 */
602 if (RPC_DO_CALLBACK(task)) {
603 /* Define a callback save pointer */
604 void (*save_callback)(struct rpc_task *);
605
606 /*
607 * If a callback exists, save it, reset it,
608 * call it.
609 * The save is needed to stop from resetting
610 * another callback set within the callback handler
611 * - Dave
612 */
613 save_callback=task->tk_callback;
614 task->tk_callback=NULL;
615 lock_kernel();
616 save_callback(task);
617 unlock_kernel();
618 }
619
620 /*
621 * Perform the next FSM step.
622 * tk_action may be NULL when the task has been killed
623 * by someone else.
624 */
625 if (!RPC_IS_QUEUED(task)) {
d05fdb0c
TM
626 if (task->tk_action != NULL) {
627 lock_kernel();
628 task->tk_action(task);
629 unlock_kernel();
630 } else if (__rpc_do_exit(task))
1da177e4 631 break;
1da177e4
LT
632 }
633
634 /*
635 * Lockless check for whether task is sleeping or not.
636 */
637 if (!RPC_IS_QUEUED(task))
638 continue;
639 rpc_clear_running(task);
640 if (RPC_IS_ASYNC(task)) {
641 /* Careful! we may have raced... */
642 if (RPC_IS_QUEUED(task))
643 return 0;
644 if (rpc_test_and_set_running(task))
645 return 0;
646 continue;
647 }
648
649 /* sync task: sleep here */
650 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
651 if (RPC_TASK_UNINTERRUPTIBLE(task)) {
652 __wait_event(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task));
653 } else {
654 __wait_event_interruptible(task->u.tk_wait.waitq, !RPC_IS_QUEUED(task), status);
655 /*
656 * When a sync task receives a signal, it exits with
657 * -ERESTARTSYS. In order to catch any callbacks that
658 * clean up after sleeping on some queue, we don't
659 * break the loop here, but go around once more.
660 */
661 if (status == -ERESTARTSYS) {
662 dprintk("RPC: %4d got signal\n", task->tk_pid);
663 task->tk_flags |= RPC_TASK_KILLED;
664 rpc_exit(task, -ERESTARTSYS);
665 rpc_wake_up_task(task);
666 }
667 }
668 rpc_set_running(task);
669 dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
670 }
671
1da177e4
LT
672 dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);
673 status = task->tk_status;
674
675 /* Release all resources associated with the task */
676 rpc_release_task(task);
677 return status;
678}
679
680/*
681 * User-visible entry point to the scheduler.
682 *
683 * This may be called recursively if e.g. an async NFS task updates
684 * the attributes and finds that dirty pages must be flushed.
685 * NOTE: Upon exit of this function the task is guaranteed to be
686 * released. In particular note that tk_release() will have
687 * been called, so your task memory may have been freed.
688 */
689int
690rpc_execute(struct rpc_task *task)
691{
692 BUG_ON(task->tk_active);
693
694 task->tk_active = 1;
695 rpc_set_running(task);
696 return __rpc_execute(task);
697}
698
699static void rpc_async_schedule(void *arg)
700{
701 __rpc_execute((struct rpc_task *)arg);
702}
703
704/*
705 * Allocate memory for RPC purposes.
706 *
707 * We try to ensure that some NFS reads and writes can always proceed
708 * by using a mempool when allocating 'small' buffers.
709 * In order to avoid memory starvation triggering more writebacks of
710 * NFS requests, we use GFP_NOFS rather than GFP_KERNEL.
711 */
712void *
713rpc_malloc(struct rpc_task *task, size_t size)
714{
715 int gfp;
716
717 if (task->tk_flags & RPC_TASK_SWAPPER)
718 gfp = GFP_ATOMIC;
719 else
720 gfp = GFP_NOFS;
721
722 if (size > RPC_BUFFER_MAXSIZE) {
723 task->tk_buffer = kmalloc(size, gfp);
724 if (task->tk_buffer)
725 task->tk_bufsize = size;
726 } else {
727 task->tk_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
728 if (task->tk_buffer)
729 task->tk_bufsize = RPC_BUFFER_MAXSIZE;
730 }
731 return task->tk_buffer;
732}
733
734static void
735rpc_free(struct rpc_task *task)
736{
737 if (task->tk_buffer) {
738 if (task->tk_bufsize == RPC_BUFFER_MAXSIZE)
739 mempool_free(task->tk_buffer, rpc_buffer_mempool);
740 else
741 kfree(task->tk_buffer);
742 task->tk_buffer = NULL;
743 task->tk_bufsize = 0;
744 }
745}
746
747/*
748 * Creation and deletion of RPC task structures
749 */
750void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action callback, int flags)
751{
752 memset(task, 0, sizeof(*task));
753 init_timer(&task->tk_timer);
754 task->tk_timer.data = (unsigned long) task;
755 task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
756 task->tk_client = clnt;
757 task->tk_flags = flags;
758 task->tk_exit = callback;
759
760 /* Initialize retry counters */
761 task->tk_garb_retry = 2;
762 task->tk_cred_retry = 2;
763
764 task->tk_priority = RPC_PRIORITY_NORMAL;
765 task->tk_cookie = (unsigned long)current;
766
767 /* Initialize workqueue for async tasks */
768 task->tk_workqueue = rpciod_workqueue;
769 if (!RPC_IS_ASYNC(task))
770 init_waitqueue_head(&task->u.tk_wait.waitq);
771
772 if (clnt) {
773 atomic_inc(&clnt->cl_users);
774 if (clnt->cl_softrtry)
775 task->tk_flags |= RPC_TASK_SOFT;
776 if (!clnt->cl_intr)
777 task->tk_flags |= RPC_TASK_NOINTR;
778 }
779
780#ifdef RPC_DEBUG
781 task->tk_magic = RPC_TASK_MAGIC_ID;
782 task->tk_pid = rpc_task_id++;
783#endif
784 /* Add to global list of all tasks */
785 spin_lock(&rpc_sched_lock);
786 list_add_tail(&task->tk_task, &all_tasks);
787 spin_unlock(&rpc_sched_lock);
788
789 dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
790 current->pid);
791}
792
793static struct rpc_task *
794rpc_alloc_task(void)
795{
796 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
797}
798
799static void
800rpc_default_free_task(struct rpc_task *task)
801{
802 dprintk("RPC: %4d freeing task\n", task->tk_pid);
803 mempool_free(task, rpc_task_mempool);
804}
805
806/*
807 * Create a new task for the specified client. We have to
808 * clean up after an allocation failure, as the client may
809 * have specified "oneshot".
810 */
811struct rpc_task *
812rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags)
813{
814 struct rpc_task *task;
815
816 task = rpc_alloc_task();
817 if (!task)
818 goto cleanup;
819
820 rpc_init_task(task, clnt, callback, flags);
821
822 /* Replace tk_release */
823 task->tk_release = rpc_default_free_task;
824
825 dprintk("RPC: %4d allocated task\n", task->tk_pid);
826 task->tk_flags |= RPC_TASK_DYNAMIC;
827out:
828 return task;
829
830cleanup:
831 /* Check whether to release the client */
832 if (clnt) {
833 printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
834 atomic_read(&clnt->cl_users), clnt->cl_oneshot);
835 atomic_inc(&clnt->cl_users); /* pretend we were used ... */
836 rpc_release_client(clnt);
837 }
838 goto out;
839}
840
841void rpc_release_task(struct rpc_task *task)
842{
843 dprintk("RPC: %4d release task\n", task->tk_pid);
844
845#ifdef RPC_DEBUG
846 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
847#endif
848
849 /* Remove from global task list */
850 spin_lock(&rpc_sched_lock);
851 list_del(&task->tk_task);
852 spin_unlock(&rpc_sched_lock);
853
854 BUG_ON (RPC_IS_QUEUED(task));
855 task->tk_active = 0;
856
857 /* Synchronously delete any running timer */
858 rpc_delete_timer(task);
859
860 /* Release resources */
861 if (task->tk_rqstp)
862 xprt_release(task);
863 if (task->tk_msg.rpc_cred)
864 rpcauth_unbindcred(task);
865 rpc_free(task);
866 if (task->tk_client) {
867 rpc_release_client(task->tk_client);
868 task->tk_client = NULL;
869 }
870
871#ifdef RPC_DEBUG
872 task->tk_magic = 0;
873#endif
874 if (task->tk_release)
875 task->tk_release(task);
876}
877
878/**
879 * rpc_find_parent - find the parent of a child task.
880 * @child: child task
881 *
882 * Checks that the parent task is still sleeping on the
883 * queue 'childq'. If so returns a pointer to the parent.
884 * Upon failure returns NULL.
885 *
886 * Caller must hold childq.lock
887 */
888static inline struct rpc_task *rpc_find_parent(struct rpc_task *child)
889{
890 struct rpc_task *task, *parent;
891 struct list_head *le;
892
893 parent = (struct rpc_task *) child->tk_calldata;
894 task_for_each(task, le, &childq.tasks[0])
895 if (task == parent)
896 return parent;
897
898 return NULL;
899}
900
901static void rpc_child_exit(struct rpc_task *child)
902{
903 struct rpc_task *parent;
904
905 spin_lock_bh(&childq.lock);
906 if ((parent = rpc_find_parent(child)) != NULL) {
907 parent->tk_status = child->tk_status;
908 __rpc_wake_up_task(parent);
909 }
910 spin_unlock_bh(&childq.lock);
911}
912
913/*
914 * Note: rpc_new_task releases the client after a failure.
915 */
916struct rpc_task *
917rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
918{
919 struct rpc_task *task;
920
921 task = rpc_new_task(clnt, NULL, RPC_TASK_ASYNC | RPC_TASK_CHILD);
922 if (!task)
923 goto fail;
924 task->tk_exit = rpc_child_exit;
925 task->tk_calldata = parent;
926 return task;
927
928fail:
929 parent->tk_status = -ENOMEM;
930 return NULL;
931}
932
933void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
934{
935 spin_lock_bh(&childq.lock);
936 /* N.B. Is it possible for the child to have already finished? */
937 __rpc_sleep_on(&childq, task, func, NULL);
938 rpc_schedule_run(child);
939 spin_unlock_bh(&childq.lock);
940}
941
942/*
943 * Kill all tasks for the given client.
944 * XXX: kill their descendants as well?
945 */
946void rpc_killall_tasks(struct rpc_clnt *clnt)
947{
948 struct rpc_task *rovr;
949 struct list_head *le;
950
951 dprintk("RPC: killing all tasks for client %p\n", clnt);
952
953 /*
954 * Spin lock all_tasks to prevent changes...
955 */
956 spin_lock(&rpc_sched_lock);
957 alltask_for_each(rovr, le, &all_tasks) {
958 if (! RPC_IS_ACTIVATED(rovr))
959 continue;
960 if (!clnt || rovr->tk_client == clnt) {
961 rovr->tk_flags |= RPC_TASK_KILLED;
962 rpc_exit(rovr, -EIO);
963 rpc_wake_up_task(rovr);
964 }
965 }
966 spin_unlock(&rpc_sched_lock);
967}
968
969static DECLARE_MUTEX_LOCKED(rpciod_running);
970
971static void rpciod_killall(void)
972{
973 unsigned long flags;
974
975 while (!list_empty(&all_tasks)) {
976 clear_thread_flag(TIF_SIGPENDING);
977 rpc_killall_tasks(NULL);
978 flush_workqueue(rpciod_workqueue);
979 if (!list_empty(&all_tasks)) {
980 dprintk("rpciod_killall: waiting for tasks to exit\n");
981 yield();
982 }
983 }
984
985 spin_lock_irqsave(&current->sighand->siglock, flags);
986 recalc_sigpending();
987 spin_unlock_irqrestore(&current->sighand->siglock, flags);
988}
989
990/*
991 * Start up the rpciod process if it's not already running.
992 */
993int
994rpciod_up(void)
995{
996 struct workqueue_struct *wq;
997 int error = 0;
998
999 down(&rpciod_sema);
1000 dprintk("rpciod_up: users %d\n", rpciod_users);
1001 rpciod_users++;
1002 if (rpciod_workqueue)
1003 goto out;
1004 /*
1005 * If there's no pid, we should be the first user.
1006 */
1007 if (rpciod_users > 1)
1008 printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users);
1009 /*
1010 * Create the rpciod thread and wait for it to start.
1011 */
1012 error = -ENOMEM;
1013 wq = create_workqueue("rpciod");
1014 if (wq == NULL) {
1015 printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error);
1016 rpciod_users--;
1017 goto out;
1018 }
1019 rpciod_workqueue = wq;
1020 error = 0;
1021out:
1022 up(&rpciod_sema);
1023 return error;
1024}
1025
1026void
1027rpciod_down(void)
1028{
1029 down(&rpciod_sema);
1030 dprintk("rpciod_down sema %d\n", rpciod_users);
1031 if (rpciod_users) {
1032 if (--rpciod_users)
1033 goto out;
1034 } else
1035 printk(KERN_WARNING "rpciod_down: no users??\n");
1036
1037 if (!rpciod_workqueue) {
1038 dprintk("rpciod_down: Nothing to do!\n");
1039 goto out;
1040 }
1041 rpciod_killall();
1042
1043 destroy_workqueue(rpciod_workqueue);
1044 rpciod_workqueue = NULL;
1045 out:
1046 up(&rpciod_sema);
1047}
1048
1049#ifdef RPC_DEBUG
1050void rpc_show_tasks(void)
1051{
1052 struct list_head *le;
1053 struct rpc_task *t;
1054
1055 spin_lock(&rpc_sched_lock);
1056 if (list_empty(&all_tasks)) {
1057 spin_unlock(&rpc_sched_lock);
1058 return;
1059 }
1060 printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
1061 "-rpcwait -action- --exit--\n");
1062 alltask_for_each(t, le, &all_tasks) {
1063 const char *rpc_waitq = "none";
1064
1065 if (RPC_IS_QUEUED(t))
1066 rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
1067
1068 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
1069 t->tk_pid,
1070 (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
1071 t->tk_flags, t->tk_status,
1072 t->tk_client,
1073 (t->tk_client ? t->tk_client->cl_prog : 0),
1074 t->tk_rqstp, t->tk_timeout,
1075 rpc_waitq,
1076 t->tk_action, t->tk_exit);
1077 }
1078 spin_unlock(&rpc_sched_lock);
1079}
1080#endif
1081
1082void
1083rpc_destroy_mempool(void)
1084{
1085 if (rpc_buffer_mempool)
1086 mempool_destroy(rpc_buffer_mempool);
1087 if (rpc_task_mempool)
1088 mempool_destroy(rpc_task_mempool);
1089 if (rpc_task_slabp && kmem_cache_destroy(rpc_task_slabp))
1090 printk(KERN_INFO "rpc_task: not all structures were freed\n");
1091 if (rpc_buffer_slabp && kmem_cache_destroy(rpc_buffer_slabp))
1092 printk(KERN_INFO "rpc_buffers: not all structures were freed\n");
1093}
1094
1095int
1096rpc_init_mempool(void)
1097{
1098 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1099 sizeof(struct rpc_task),
1100 0, SLAB_HWCACHE_ALIGN,
1101 NULL, NULL);
1102 if (!rpc_task_slabp)
1103 goto err_nomem;
1104 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1105 RPC_BUFFER_MAXSIZE,
1106 0, SLAB_HWCACHE_ALIGN,
1107 NULL, NULL);
1108 if (!rpc_buffer_slabp)
1109 goto err_nomem;
1110 rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE,
1111 mempool_alloc_slab,
1112 mempool_free_slab,
1113 rpc_task_slabp);
1114 if (!rpc_task_mempool)
1115 goto err_nomem;
1116 rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE,
1117 mempool_alloc_slab,
1118 mempool_free_slab,
1119 rpc_buffer_slabp);
1120 if (!rpc_buffer_mempool)
1121 goto err_nomem;
1122 return 0;
1123err_nomem:
1124 rpc_destroy_mempool();
1125 return -ENOMEM;
1126}