]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - net/sunrpc/sched.c
SUNRPC: Refactor xprt_request_wait_receive()
[mirror_ubuntu-focal-kernel.git] / net / sunrpc / sched.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/sched.c
3 *
4 * Scheduling for synchronous and asynchronous RPC requests.
5 *
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
cca5172a 7 *
1da177e4
LT
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 */
11
12#include <linux/module.h>
13
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/mempool.h>
18#include <linux/smp.h>
1da177e4 19#include <linux/spinlock.h>
4a3e2f71 20#include <linux/mutex.h>
d310310c 21#include <linux/freezer.h>
a1231fda 22#include <linux/sched/mm.h>
1da177e4
LT
23
24#include <linux/sunrpc/clnt.h>
1da177e4 25
6951867b
BH
26#include "sunrpc.h"
27
f895b252 28#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1da177e4 29#define RPCDBG_FACILITY RPCDBG_SCHED
1da177e4
LT
30#endif
31
82b0a4c3
TM
32#define CREATE_TRACE_POINTS
33#include <trace/events/sunrpc.h>
34
1da177e4
LT
35/*
36 * RPC slabs and memory pools
37 */
38#define RPC_BUFFER_MAXSIZE (2048)
39#define RPC_BUFFER_POOLSIZE (8)
40#define RPC_TASK_POOLSIZE (8)
e18b890b
CL
41static struct kmem_cache *rpc_task_slabp __read_mostly;
42static struct kmem_cache *rpc_buffer_slabp __read_mostly;
ba89966c
ED
43static mempool_t *rpc_task_mempool __read_mostly;
44static mempool_t *rpc_buffer_mempool __read_mostly;
1da177e4 45
65f27f38 46static void rpc_async_schedule(struct work_struct *);
bde8f00c 47static void rpc_release_task(struct rpc_task *task);
ff861c4d 48static void __rpc_queue_timer_fn(struct timer_list *t);
1da177e4 49
1da177e4
LT
50/*
51 * RPC tasks sit here while waiting for conditions to improve.
52 */
a4a87499 53static struct rpc_wait_queue delay_queue;
1da177e4 54
1da177e4
LT
55/*
56 * rpciod-related stuff
57 */
40a5f1b1
TM
58struct workqueue_struct *rpciod_workqueue __read_mostly;
59struct workqueue_struct *xprtiod_workqueue __read_mostly;
1da177e4 60
1da177e4
LT
61/*
62 * Disable the timer for a given RPC task. Should be called with
63 * queue->lock and bh_disabled in order to avoid races within
64 * rpc_run_timer().
65 */
5d00837b 66static void
eb276c0e 67__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 68{
36df9aae
TM
69 if (task->tk_timeout == 0)
70 return;
46121cf7 71 dprintk("RPC: %5u disabling timer\n", task->tk_pid);
1da177e4 72 task->tk_timeout = 0;
36df9aae 73 list_del(&task->u.tk_wait.timer_list);
eb276c0e
TM
74 if (list_empty(&queue->timer_list.list))
75 del_timer(&queue->timer_list.timer);
36df9aae
TM
76}
77
78static void
79rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
80{
81 queue->timer_list.expires = expires;
82 mod_timer(&queue->timer_list.timer, expires);
1da177e4
LT
83}
84
1da177e4
LT
85/*
86 * Set up a timer for the current task.
87 */
5d00837b 88static void
eb276c0e 89__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4
LT
90{
91 if (!task->tk_timeout)
92 return;
93
55cc1d78
NMG
94 dprintk("RPC: %5u setting alarm for %u ms\n",
95 task->tk_pid, jiffies_to_msecs(task->tk_timeout));
1da177e4 96
eb276c0e
TM
97 task->u.tk_wait.expires = jiffies + task->tk_timeout;
98 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
99 rpc_set_queue_timer(queue, task->u.tk_wait.expires);
100 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
1da177e4
LT
101}
102
c05eecf6
TM
103static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
104{
edd2e36f 105 if (queue->priority != priority) {
edd2e36f 106 queue->priority = priority;
f42f7c28 107 queue->nr = 1U << priority;
edd2e36f 108 }
c05eecf6
TM
109}
110
c05eecf6
TM
111static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
112{
113 rpc_set_waitqueue_priority(queue, queue->maxpriority);
c05eecf6
TM
114}
115
1da177e4 116/*
f42f7c28 117 * Add a request to a queue list
1da177e4 118 */
f42f7c28
TM
119static void
120__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
1da177e4 121{
1da177e4
LT
122 struct rpc_task *t;
123
1da177e4 124 list_for_each_entry(t, q, u.tk_wait.list) {
3ff7576d 125 if (t->tk_owner == task->tk_owner) {
f42f7c28
TM
126 list_add_tail(&task->u.tk_wait.links,
127 &t->u.tk_wait.links);
128 /* Cache the queue head in task->u.tk_wait.list */
129 task->u.tk_wait.list.next = q;
130 task->u.tk_wait.list.prev = NULL;
1da177e4
LT
131 return;
132 }
133 }
f42f7c28 134 INIT_LIST_HEAD(&task->u.tk_wait.links);
1da177e4
LT
135 list_add_tail(&task->u.tk_wait.list, q);
136}
137
f42f7c28
TM
138/*
139 * Remove request from a queue list
140 */
141static void
142__rpc_list_dequeue_task(struct rpc_task *task)
143{
144 struct list_head *q;
145 struct rpc_task *t;
146
147 if (task->u.tk_wait.list.prev == NULL) {
148 list_del(&task->u.tk_wait.links);
149 return;
150 }
151 if (!list_empty(&task->u.tk_wait.links)) {
152 t = list_first_entry(&task->u.tk_wait.links,
153 struct rpc_task,
154 u.tk_wait.links);
155 /* Assume __rpc_list_enqueue_task() cached the queue head */
156 q = t->u.tk_wait.list.next;
157 list_add_tail(&t->u.tk_wait.list, q);
158 list_del(&task->u.tk_wait.links);
159 }
160 list_del(&task->u.tk_wait.list);
161}
162
163/*
164 * Add new request to a priority queue.
165 */
166static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
167 struct rpc_task *task,
168 unsigned char queue_priority)
169{
170 if (unlikely(queue_priority > queue->maxpriority))
171 queue_priority = queue->maxpriority;
172 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
173}
174
1da177e4
LT
175/*
176 * Add new request to wait queue.
177 *
178 * Swapper tasks always get inserted at the head of the queue.
179 * This should avoid many nasty memory deadlocks and hopefully
180 * improve overall performance.
181 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
182 */
3b27bad7
TM
183static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
184 struct rpc_task *task,
185 unsigned char queue_priority)
1da177e4 186{
2bd4eef8
WAA
187 WARN_ON_ONCE(RPC_IS_QUEUED(task));
188 if (RPC_IS_QUEUED(task))
189 return;
1da177e4
LT
190
191 if (RPC_IS_PRIORITY(queue))
3b27bad7 192 __rpc_add_wait_queue_priority(queue, task, queue_priority);
1da177e4
LT
193 else if (RPC_IS_SWAPPER(task))
194 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
195 else
196 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
96ef13b2 197 task->tk_waitqueue = queue;
e19b63da 198 queue->qlen++;
1166fde6
TM
199 /* barrier matches the read in rpc_wake_up_task_queue_locked() */
200 smp_wmb();
1da177e4
LT
201 rpc_set_queued(task);
202
46121cf7
CL
203 dprintk("RPC: %5u added to queue %p \"%s\"\n",
204 task->tk_pid, queue, rpc_qname(queue));
1da177e4
LT
205}
206
207/*
208 * Remove request from a priority queue.
209 */
210static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
211{
f42f7c28 212 __rpc_list_dequeue_task(task);
1da177e4
LT
213}
214
215/*
216 * Remove request from queue.
217 * Note: must be called with spin lock held.
218 */
96ef13b2 219static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 220{
eb276c0e 221 __rpc_disable_timer(queue, task);
1da177e4
LT
222 if (RPC_IS_PRIORITY(queue))
223 __rpc_remove_wait_queue_priority(task);
f42f7c28
TM
224 else
225 list_del(&task->u.tk_wait.list);
e19b63da 226 queue->qlen--;
46121cf7
CL
227 dprintk("RPC: %5u removed from queue %p \"%s\"\n",
228 task->tk_pid, queue, rpc_qname(queue));
1da177e4
LT
229}
230
3ff7576d 231static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
1da177e4
LT
232{
233 int i;
234
235 spin_lock_init(&queue->lock);
236 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
237 INIT_LIST_HEAD(&queue->tasks[i]);
3ff7576d 238 queue->maxpriority = nr_queues - 1;
1da177e4 239 rpc_reset_waitqueue_priority(queue);
36df9aae 240 queue->qlen = 0;
ff861c4d 241 timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0);
36df9aae 242 INIT_LIST_HEAD(&queue->timer_list.list);
2f09c242 243 rpc_assign_waitqueue_name(queue, qname);
1da177e4
LT
244}
245
246void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
247{
3ff7576d 248 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
1da177e4 249}
689cf5c1 250EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
1da177e4
LT
251
252void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
253{
3ff7576d 254 __rpc_init_priority_wait_queue(queue, qname, 1);
1da177e4 255}
e8914c65 256EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
1da177e4 257
f6a1cc89
TM
258void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
259{
36df9aae 260 del_timer_sync(&queue->timer_list.timer);
f6a1cc89
TM
261}
262EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
263
dfd01f02 264static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
44c28873 265{
416ad3c9 266 freezable_schedule_unsafe();
dfd01f02
PZ
267 if (signal_pending_state(mode, current))
268 return -ERESTARTSYS;
44c28873
TM
269 return 0;
270}
271
1306729b 272#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
c44fe705
TM
273static void rpc_task_set_debuginfo(struct rpc_task *task)
274{
275 static atomic_t rpc_pid;
276
c44fe705
TM
277 task->tk_pid = atomic_inc_return(&rpc_pid);
278}
279#else
280static inline void rpc_task_set_debuginfo(struct rpc_task *task)
281{
282}
283#endif
284
e6b3c4db
TM
285static void rpc_set_active(struct rpc_task *task)
286{
c44fe705 287 rpc_task_set_debuginfo(task);
58f9612c 288 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
e671edb9 289 trace_rpc_task_begin(task, NULL);
e6b3c4db
TM
290}
291
44c28873
TM
292/*
293 * Mark an RPC call as having completed by clearing the 'active' bit
bf294b41 294 * and then waking up all tasks that were sleeping.
44c28873 295 */
bf294b41 296static int rpc_complete_task(struct rpc_task *task)
44c28873 297{
bf294b41
TM
298 void *m = &task->tk_runstate;
299 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
300 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
301 unsigned long flags;
302 int ret;
303
e671edb9 304 trace_rpc_task_complete(task, NULL);
82b0a4c3 305
bf294b41 306 spin_lock_irqsave(&wq->lock, flags);
e6b3c4db 307 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
bf294b41
TM
308 ret = atomic_dec_and_test(&task->tk_count);
309 if (waitqueue_active(wq))
ac5be6b4 310 __wake_up_locked_key(wq, TASK_NORMAL, &k);
bf294b41
TM
311 spin_unlock_irqrestore(&wq->lock, flags);
312 return ret;
44c28873
TM
313}
314
315/*
316 * Allow callers to wait for completion of an RPC call
bf294b41
TM
317 *
318 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
319 * to enforce taking of the wq->lock and hence avoid races with
320 * rpc_complete_task().
44c28873 321 */
c1221321 322int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
44c28873
TM
323{
324 if (action == NULL)
150030b7 325 action = rpc_wait_bit_killable;
bf294b41 326 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
150030b7 327 action, TASK_KILLABLE);
44c28873 328}
e8914c65 329EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
44c28873 330
1da177e4
LT
331/*
332 * Make an RPC task runnable.
333 *
506026c3
JL
334 * Note: If the task is ASYNC, and is being made runnable after sitting on an
335 * rpc_wait_queue, this must be called with the queue spinlock held to protect
336 * the wait queue operation.
a3c3cac5
TM
337 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
338 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
339 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
340 * the RPC_TASK_RUNNING flag.
1da177e4 341 */
f1dc237c
TM
342static void rpc_make_runnable(struct workqueue_struct *wq,
343 struct rpc_task *task)
1da177e4 344{
a3c3cac5
TM
345 bool need_wakeup = !rpc_test_and_set_running(task);
346
1da177e4 347 rpc_clear_queued(task);
a3c3cac5 348 if (!need_wakeup)
cc4dc59e 349 return;
1da177e4 350 if (RPC_IS_ASYNC(task)) {
65f27f38 351 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
f1dc237c 352 queue_work(wq, &task->u.tk_work);
1da177e4 353 } else
96651ab3 354 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
1da177e4
LT
355}
356
1da177e4
LT
357/*
358 * Prepare for sleeping on a wait queue.
359 * By always appending tasks to the list we ensure FIFO behavior.
360 * NB: An RPC task will only receive interrupt-driven events as long
361 * as it's on a wait queue.
362 */
3b27bad7
TM
363static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
364 struct rpc_task *task,
365 rpc_action action,
366 unsigned char queue_priority)
1da177e4 367{
46121cf7
CL
368 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
369 task->tk_pid, rpc_qname(q), jiffies);
1da177e4 370
e671edb9 371 trace_rpc_task_sleep(task, q);
82b0a4c3 372
3b27bad7 373 __rpc_add_wait_queue(q, task, queue_priority);
1da177e4 374
f50ad428 375 WARN_ON_ONCE(task->tk_callback != NULL);
1da177e4 376 task->tk_callback = action;
eb276c0e 377 __rpc_add_timer(q, task);
1da177e4
LT
378}
379
380void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
5d00837b 381 rpc_action action)
1da177e4 382{
58f9612c 383 /* We shouldn't ever put an inactive task to sleep */
e454a7a8
WAA
384 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
385 if (!RPC_IS_ACTIVATED(task)) {
386 task->tk_status = -EIO;
387 rpc_put_task_async(task);
388 return;
389 }
e6b3c4db 390
1da177e4
LT
391 /*
392 * Protect the queue operations.
393 */
394 spin_lock_bh(&q->lock);
3b27bad7 395 __rpc_sleep_on_priority(q, task, action, task->tk_priority);
1da177e4
LT
396 spin_unlock_bh(&q->lock);
397}
e8914c65 398EXPORT_SYMBOL_GPL(rpc_sleep_on);
1da177e4 399
3b27bad7
TM
400void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
401 rpc_action action, int priority)
402{
403 /* We shouldn't ever put an inactive task to sleep */
e454a7a8
WAA
404 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
405 if (!RPC_IS_ACTIVATED(task)) {
406 task->tk_status = -EIO;
407 rpc_put_task_async(task);
408 return;
409 }
3b27bad7
TM
410
411 /*
412 * Protect the queue operations.
413 */
414 spin_lock_bh(&q->lock);
415 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
416 spin_unlock_bh(&q->lock);
417}
1e1093c7 418EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
3b27bad7 419
1da177e4 420/**
f1dc237c
TM
421 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
422 * @wq: workqueue on which to run task
96ef13b2 423 * @queue: wait queue
1da177e4
LT
424 * @task: task to be woken up
425 *
426 * Caller must hold queue->lock, and have cleared the task queued flag.
427 */
f1dc237c
TM
428static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
429 struct rpc_wait_queue *queue,
430 struct rpc_task *task)
1da177e4 431{
46121cf7
CL
432 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
433 task->tk_pid, jiffies);
1da177e4 434
1da177e4
LT
435 /* Has the task been executed yet? If not, we cannot wake it up! */
436 if (!RPC_IS_ACTIVATED(task)) {
437 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
438 return;
439 }
440
e671edb9 441 trace_rpc_task_wakeup(task, queue);
82b0a4c3 442
96ef13b2 443 __rpc_remove_wait_queue(queue, task);
1da177e4 444
f1dc237c 445 rpc_make_runnable(wq, task);
1da177e4 446
46121cf7 447 dprintk("RPC: __rpc_wake_up_task done\n");
1da177e4
LT
448}
449
450/*
96ef13b2 451 * Wake up a queued task while the queue lock is being held
1da177e4 452 */
359c48c0
TM
453static struct rpc_task *
454rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
455 struct rpc_wait_queue *queue, struct rpc_task *task,
456 bool (*action)(struct rpc_task *, void *), void *data)
1da177e4 457{
1166fde6
TM
458 if (RPC_IS_QUEUED(task)) {
459 smp_rmb();
359c48c0
TM
460 if (task->tk_waitqueue == queue) {
461 if (action == NULL || action(task, data)) {
462 __rpc_do_wake_up_task_on_wq(wq, queue, task);
463 return task;
464 }
465 }
1166fde6 466 }
359c48c0
TM
467 return NULL;
468}
469
470static void
471rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
472 struct rpc_wait_queue *queue, struct rpc_task *task)
473{
474 rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task, NULL, NULL);
1da177e4
LT
475}
476
f1dc237c
TM
477/*
478 * Wake up a queued task while the queue lock is being held
479 */
480static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
481{
482 rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task);
483}
484
2275cde4
TM
485/*
486 * Wake up a task on a specific queue
487 */
488void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
489 struct rpc_wait_queue *queue,
490 struct rpc_task *task)
491{
5ce97039
TM
492 if (!RPC_IS_QUEUED(task))
493 return;
2275cde4
TM
494 spin_lock_bh(&queue->lock);
495 rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
496 spin_unlock_bh(&queue->lock);
497}
498
1da177e4 499/*
96ef13b2 500 * Wake up a task on a specific queue
1da177e4 501 */
96ef13b2 502void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 503{
5ce97039
TM
504 if (!RPC_IS_QUEUED(task))
505 return;
5e4424af 506 spin_lock_bh(&queue->lock);
96ef13b2 507 rpc_wake_up_task_queue_locked(queue, task);
5e4424af 508 spin_unlock_bh(&queue->lock);
1da177e4 509}
96ef13b2
TM
510EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
511
359c48c0
TM
512static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
513{
514 task->tk_status = *(int *)status;
515 return true;
516}
517
518static void
519rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
520 struct rpc_task *task, int status)
521{
522 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
523 task, rpc_task_action_set_status, &status);
524}
525
526/**
527 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
528 * @queue: pointer to rpc_wait_queue
529 * @task: pointer to rpc_task
530 * @status: integer error value
531 *
532 * If @task is queued on @queue, then it is woken up, and @task->tk_status is
533 * set to the value of @status.
534 */
535void
536rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
537 struct rpc_task *task, int status)
538{
539 if (!RPC_IS_QUEUED(task))
540 return;
541 spin_lock_bh(&queue->lock);
542 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
543 spin_unlock_bh(&queue->lock);
544}
545
1da177e4
LT
546/*
547 * Wake up the next task on a priority queue.
548 */
961a828d 549static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
1da177e4
LT
550{
551 struct list_head *q;
552 struct rpc_task *task;
553
554 /*
3ff7576d 555 * Service a batch of tasks from a single owner.
1da177e4
LT
556 */
557 q = &queue->tasks[queue->priority];
f42f7c28
TM
558 if (!list_empty(q) && --queue->nr) {
559 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
560 goto out;
1da177e4
LT
561 }
562
563 /*
564 * Service the next queue.
565 */
566 do {
567 if (q == &queue->tasks[0])
568 q = &queue->tasks[queue->maxpriority];
569 else
570 q = q - 1;
571 if (!list_empty(q)) {
f42f7c28 572 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
1da177e4
LT
573 goto new_queue;
574 }
575 } while (q != &queue->tasks[queue->priority]);
576
577 rpc_reset_waitqueue_priority(queue);
578 return NULL;
579
580new_queue:
581 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
1da177e4 582out:
1da177e4
LT
583 return task;
584}
585
961a828d
TM
586static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
587{
588 if (RPC_IS_PRIORITY(queue))
589 return __rpc_find_next_queued_priority(queue);
590 if (!list_empty(&queue->tasks[0]))
591 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
592 return NULL;
593}
594
1da177e4 595/*
961a828d 596 * Wake up the first task on the wait queue.
1da177e4 597 */
f1dc237c
TM
598struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
599 struct rpc_wait_queue *queue,
961a828d 600 bool (*func)(struct rpc_task *, void *), void *data)
1da177e4
LT
601{
602 struct rpc_task *task = NULL;
603
961a828d 604 dprintk("RPC: wake_up_first(%p \"%s\")\n",
46121cf7 605 queue, rpc_qname(queue));
5e4424af 606 spin_lock_bh(&queue->lock);
961a828d 607 task = __rpc_find_next_queued(queue);
359c48c0
TM
608 if (task != NULL)
609 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
610 task, func, data);
5e4424af 611 spin_unlock_bh(&queue->lock);
1da177e4
LT
612
613 return task;
614}
f1dc237c
TM
615
616/*
617 * Wake up the first task on the wait queue.
618 */
619struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
620 bool (*func)(struct rpc_task *, void *), void *data)
621{
622 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
623}
961a828d
TM
624EXPORT_SYMBOL_GPL(rpc_wake_up_first);
625
626static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
627{
628 return true;
629}
630
631/*
632 * Wake up the next task on the wait queue.
633*/
634struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
635{
636 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
637}
e8914c65 638EXPORT_SYMBOL_GPL(rpc_wake_up_next);
1da177e4
LT
639
640/**
641 * rpc_wake_up - wake up all rpc_tasks
642 * @queue: rpc_wait_queue on which the tasks are sleeping
643 *
644 * Grabs queue->lock
645 */
646void rpc_wake_up(struct rpc_wait_queue *queue)
647{
1da177e4 648 struct list_head *head;
e6d83d55 649
5e4424af 650 spin_lock_bh(&queue->lock);
1da177e4
LT
651 head = &queue->tasks[queue->maxpriority];
652 for (;;) {
540a0f75
TM
653 while (!list_empty(head)) {
654 struct rpc_task *task;
655 task = list_first_entry(head,
656 struct rpc_task,
657 u.tk_wait.list);
96ef13b2 658 rpc_wake_up_task_queue_locked(queue, task);
540a0f75 659 }
1da177e4
LT
660 if (head == &queue->tasks[0])
661 break;
662 head--;
663 }
5e4424af 664 spin_unlock_bh(&queue->lock);
1da177e4 665}
e8914c65 666EXPORT_SYMBOL_GPL(rpc_wake_up);
1da177e4
LT
667
668/**
669 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
670 * @queue: rpc_wait_queue on which the tasks are sleeping
671 * @status: status value to set
672 *
673 * Grabs queue->lock
674 */
675void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
676{
677 struct list_head *head;
1da177e4 678
5e4424af 679 spin_lock_bh(&queue->lock);
1da177e4
LT
680 head = &queue->tasks[queue->maxpriority];
681 for (;;) {
540a0f75
TM
682 while (!list_empty(head)) {
683 struct rpc_task *task;
684 task = list_first_entry(head,
685 struct rpc_task,
686 u.tk_wait.list);
1da177e4 687 task->tk_status = status;
96ef13b2 688 rpc_wake_up_task_queue_locked(queue, task);
1da177e4
LT
689 }
690 if (head == &queue->tasks[0])
691 break;
692 head--;
693 }
5e4424af 694 spin_unlock_bh(&queue->lock);
1da177e4 695}
e8914c65 696EXPORT_SYMBOL_GPL(rpc_wake_up_status);
1da177e4 697
ff861c4d 698static void __rpc_queue_timer_fn(struct timer_list *t)
36df9aae 699{
ff861c4d 700 struct rpc_wait_queue *queue = from_timer(queue, t, timer_list.timer);
36df9aae
TM
701 struct rpc_task *task, *n;
702 unsigned long expires, now, timeo;
703
704 spin_lock(&queue->lock);
705 expires = now = jiffies;
706 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
707 timeo = task->u.tk_wait.expires;
708 if (time_after_eq(now, timeo)) {
36df9aae
TM
709 dprintk("RPC: %5u timeout\n", task->tk_pid);
710 task->tk_status = -ETIMEDOUT;
711 rpc_wake_up_task_queue_locked(queue, task);
712 continue;
713 }
714 if (expires == now || time_after(expires, timeo))
715 expires = timeo;
716 }
717 if (!list_empty(&queue->timer_list.list))
718 rpc_set_queue_timer(queue, expires);
719 spin_unlock(&queue->lock);
720}
721
8014793b
TM
722static void __rpc_atrun(struct rpc_task *task)
723{
6bd14416
TM
724 if (task->tk_status == -ETIMEDOUT)
725 task->tk_status = 0;
8014793b
TM
726}
727
1da177e4
LT
728/*
729 * Run a task at a later time
730 */
8014793b 731void rpc_delay(struct rpc_task *task, unsigned long delay)
1da177e4
LT
732{
733 task->tk_timeout = delay;
5d00837b 734 rpc_sleep_on(&delay_queue, task, __rpc_atrun);
1da177e4 735}
e8914c65 736EXPORT_SYMBOL_GPL(rpc_delay);
1da177e4 737
4ce70ada
TM
738/*
739 * Helper to call task->tk_ops->rpc_call_prepare
740 */
aae2006e 741void rpc_prepare_task(struct rpc_task *task)
4ce70ada
TM
742{
743 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
744}
745
7fdcf13b
TM
746static void
747rpc_init_task_statistics(struct rpc_task *task)
748{
749 /* Initialize retry counters */
750 task->tk_garb_retry = 2;
751 task->tk_cred_retry = 2;
752 task->tk_rebind_retry = 2;
753
754 /* starting timestamp */
755 task->tk_start = ktime_get();
756}
757
758static void
759rpc_reset_task_statistics(struct rpc_task *task)
760{
761 task->tk_timeouts = 0;
ae67bd38 762 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
7fdcf13b
TM
763 rpc_init_task_statistics(task);
764}
765
d05fdb0c 766/*
963d8fe5 767 * Helper that calls task->tk_ops->rpc_call_done if it exists
d05fdb0c 768 */
abbcf28f 769void rpc_exit_task(struct rpc_task *task)
d05fdb0c 770{
abbcf28f 771 task->tk_action = NULL;
963d8fe5
TM
772 if (task->tk_ops->rpc_call_done != NULL) {
773 task->tk_ops->rpc_call_done(task, task->tk_calldata);
d05fdb0c 774 if (task->tk_action != NULL) {
abbcf28f
TM
775 /* Always release the RPC slot and buffer memory */
776 xprt_release(task);
7fdcf13b 777 rpc_reset_task_statistics(task);
d05fdb0c
TM
778 }
779 }
d05fdb0c 780}
d9b6cd94 781
ae67bd38
TM
782void rpc_signal_task(struct rpc_task *task)
783{
784 struct rpc_wait_queue *queue;
785
786 if (!RPC_IS_ACTIVATED(task))
787 return;
788 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
789 smp_mb__after_atomic();
790 queue = READ_ONCE(task->tk_waitqueue);
791 if (queue)
792 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
793}
794
d9b6cd94
TM
795void rpc_exit(struct rpc_task *task, int status)
796{
797 task->tk_status = status;
798 task->tk_action = rpc_exit_task;
6b5f5900 799 rpc_wake_up_queued_task(task->tk_waitqueue, task);
d9b6cd94
TM
800}
801EXPORT_SYMBOL_GPL(rpc_exit);
d05fdb0c 802
bbd5a1f9
TM
803void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
804{
a86dc496 805 if (ops->rpc_release != NULL)
bbd5a1f9 806 ops->rpc_release(calldata);
bbd5a1f9
TM
807}
808
1da177e4
LT
809/*
810 * This is the RPC `scheduler' (or rather, the finite state machine).
811 */
2efef837 812static void __rpc_execute(struct rpc_task *task)
1da177e4 813{
eb9b55ab
TM
814 struct rpc_wait_queue *queue;
815 int task_is_async = RPC_IS_ASYNC(task);
816 int status = 0;
1da177e4 817
46121cf7
CL
818 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
819 task->tk_pid, task->tk_flags);
1da177e4 820
2bd4eef8
WAA
821 WARN_ON_ONCE(RPC_IS_QUEUED(task));
822 if (RPC_IS_QUEUED(task))
823 return;
1da177e4 824
d05fdb0c 825 for (;;) {
b55c5989 826 void (*do_action)(struct rpc_task *);
1da177e4
LT
827
828 /*
21ead9ff
CL
829 * Perform the next FSM step or a pending callback.
830 *
831 * tk_action may be NULL if the task has been killed.
832 * In particular, note that rpc_killall_tasks may
833 * do this at any time, so beware when dereferencing.
1da177e4 834 */
21ead9ff
CL
835 do_action = task->tk_action;
836 if (task->tk_callback) {
837 do_action = task->tk_callback;
838 task->tk_callback = NULL;
1da177e4 839 }
21ead9ff
CL
840 if (!do_action)
841 break;
e671edb9 842 trace_rpc_task_run_action(task, do_action);
b55c5989 843 do_action(task);
1da177e4
LT
844
845 /*
846 * Lockless check for whether task is sleeping or not.
847 */
848 if (!RPC_IS_QUEUED(task))
849 continue;
ae67bd38
TM
850
851 /*
852 * Signalled tasks should exit rather than sleep.
853 */
854 if (RPC_SIGNALLED(task))
855 rpc_exit(task, -ERESTARTSYS);
856
eb9b55ab
TM
857 /*
858 * The queue->lock protects against races with
859 * rpc_make_runnable().
860 *
861 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
862 * rpc_task, rpc_make_runnable() can assign it to a
863 * different workqueue. We therefore cannot assume that the
864 * rpc_task pointer may still be dereferenced.
865 */
866 queue = task->tk_waitqueue;
867 spin_lock_bh(&queue->lock);
868 if (!RPC_IS_QUEUED(task)) {
869 spin_unlock_bh(&queue->lock);
1da177e4
LT
870 continue;
871 }
eb9b55ab
TM
872 rpc_clear_running(task);
873 spin_unlock_bh(&queue->lock);
874 if (task_is_async)
875 return;
1da177e4
LT
876
877 /* sync task: sleep here */
46121cf7 878 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
96651ab3 879 status = out_of_line_wait_on_bit(&task->tk_runstate,
150030b7
MW
880 RPC_TASK_QUEUED, rpc_wait_bit_killable,
881 TASK_KILLABLE);
ae67bd38 882 if (status < 0) {
1da177e4
LT
883 /*
884 * When a sync task receives a signal, it exits with
885 * -ERESTARTSYS. In order to catch any callbacks that
886 * clean up after sleeping on some queue, we don't
887 * break the loop here, but go around once more.
888 */
46121cf7 889 dprintk("RPC: %5u got signal\n", task->tk_pid);
ae67bd38 890 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
96651ab3 891 rpc_exit(task, -ERESTARTSYS);
1da177e4 892 }
46121cf7 893 dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
1da177e4
LT
894 }
895
46121cf7
CL
896 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
897 task->tk_status);
1da177e4
LT
898 /* Release all resources associated with the task */
899 rpc_release_task(task);
1da177e4
LT
900}
901
902/*
903 * User-visible entry point to the scheduler.
904 *
905 * This may be called recursively if e.g. an async NFS task updates
906 * the attributes and finds that dirty pages must be flushed.
907 * NOTE: Upon exit of this function the task is guaranteed to be
908 * released. In particular note that tk_release() will have
909 * been called, so your task memory may have been freed.
910 */
2efef837 911void rpc_execute(struct rpc_task *task)
1da177e4 912{
a76580fb
TM
913 bool is_async = RPC_IS_ASYNC(task);
914
44c28873 915 rpc_set_active(task);
f1dc237c 916 rpc_make_runnable(rpciod_workqueue, task);
a76580fb 917 if (!is_async)
d6a1ed08 918 __rpc_execute(task);
1da177e4
LT
919}
920
65f27f38 921static void rpc_async_schedule(struct work_struct *work)
1da177e4 922{
a1231fda
TM
923 unsigned int pflags = memalloc_nofs_save();
924
65f27f38 925 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
a1231fda 926 memalloc_nofs_restore(pflags);
1da177e4
LT
927}
928
02107148 929/**
5fe6eaa1
CL
930 * rpc_malloc - allocate RPC buffer resources
931 * @task: RPC task
932 *
933 * A single memory region is allocated, which is split between the
934 * RPC call and RPC reply that this task is being used for. When
935 * this RPC is retired, the memory is released by calling rpc_free.
1da177e4 936 *
c5a4dd8b 937 * To prevent rpciod from hanging, this allocator never sleeps,
5fe6eaa1
CL
938 * returning -ENOMEM and suppressing warning if the request cannot
939 * be serviced immediately. The caller can arrange to sleep in a
940 * way that is safe for rpciod.
c5a4dd8b
CL
941 *
942 * Most requests are 'small' (under 2KiB) and can be serviced from a
943 * mempool, ensuring that NFS reads and writes can always proceed,
944 * and that there is good locality of reference for these buffers.
1da177e4 945 */
5fe6eaa1 946int rpc_malloc(struct rpc_task *task)
1da177e4 947{
5fe6eaa1
CL
948 struct rpc_rqst *rqst = task->tk_rqstp;
949 size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
aa3d1fae 950 struct rpc_buffer *buf;
12a3ad61 951 gfp_t gfp = GFP_NOFS;
a564b8f0
MG
952
953 if (RPC_IS_SWAPPER(task))
c4a7ca77 954 gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
1da177e4 955
aa3d1fae 956 size += sizeof(struct rpc_buffer);
c5a4dd8b
CL
957 if (size <= RPC_BUFFER_MAXSIZE)
958 buf = mempool_alloc(rpc_buffer_mempool, gfp);
1da177e4 959 else
c5a4dd8b 960 buf = kmalloc(size, gfp);
ddce40df
PZ
961
962 if (!buf)
5fe6eaa1 963 return -ENOMEM;
ddce40df 964
aa3d1fae 965 buf->len = size;
215d0678 966 dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
c5a4dd8b 967 task->tk_pid, size, buf);
5fe6eaa1 968 rqst->rq_buffer = buf->data;
68778945 969 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
5fe6eaa1 970 return 0;
1da177e4 971}
12444809 972EXPORT_SYMBOL_GPL(rpc_malloc);
1da177e4 973
02107148 974/**
3435c74a
CL
975 * rpc_free - free RPC buffer resources allocated via rpc_malloc
976 * @task: RPC task
02107148
CL
977 *
978 */
3435c74a 979void rpc_free(struct rpc_task *task)
1da177e4 980{
3435c74a 981 void *buffer = task->tk_rqstp->rq_buffer;
aa3d1fae
CL
982 size_t size;
983 struct rpc_buffer *buf;
02107148 984
aa3d1fae
CL
985 buf = container_of(buffer, struct rpc_buffer, data);
986 size = buf->len;
c5a4dd8b 987
215d0678 988 dprintk("RPC: freeing buffer of size %zu at %p\n",
c5a4dd8b 989 size, buf);
aa3d1fae 990
c5a4dd8b
CL
991 if (size <= RPC_BUFFER_MAXSIZE)
992 mempool_free(buf, rpc_buffer_mempool);
993 else
994 kfree(buf);
1da177e4 995}
12444809 996EXPORT_SYMBOL_GPL(rpc_free);
1da177e4
LT
997
998/*
999 * Creation and deletion of RPC task structures
1000 */
47fe0648 1001static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1da177e4
LT
1002{
1003 memset(task, 0, sizeof(*task));
44c28873 1004 atomic_set(&task->tk_count, 1);
84115e1c
TM
1005 task->tk_flags = task_setup_data->flags;
1006 task->tk_ops = task_setup_data->callback_ops;
1007 task->tk_calldata = task_setup_data->callback_data;
6529eba0 1008 INIT_LIST_HEAD(&task->tk_task);
1da177e4 1009
3ff7576d
TM
1010 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1011 task->tk_owner = current->tgid;
1da177e4
LT
1012
1013 /* Initialize workqueue for async tasks */
32bfb5c0 1014 task->tk_workqueue = task_setup_data->workqueue;
1da177e4 1015
9d61498d
TM
1016 task->tk_xprt = xprt_get(task_setup_data->rpc_xprt);
1017
1de7eea9
N
1018 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1019
84115e1c
TM
1020 if (task->tk_ops->rpc_call_prepare != NULL)
1021 task->tk_action = rpc_prepare_task;
963d8fe5 1022
7fdcf13b 1023 rpc_init_task_statistics(task);
ef759a2e 1024
46121cf7 1025 dprintk("RPC: new task initialized, procpid %u\n",
ba25f9dc 1026 task_pid_nr(current));
1da177e4
LT
1027}
1028
1029static struct rpc_task *
1030rpc_alloc_task(void)
1031{
12a3ad61 1032 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
1da177e4
LT
1033}
1034
1da177e4 1035/*
90c5755f 1036 * Create a new task for the specified client.
1da177e4 1037 */
84115e1c 1038struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1da177e4 1039{
e8f5d77c
TM
1040 struct rpc_task *task = setup_data->task;
1041 unsigned short flags = 0;
1042
1043 if (task == NULL) {
1044 task = rpc_alloc_task();
e8f5d77c
TM
1045 flags = RPC_TASK_DYNAMIC;
1046 }
1da177e4 1047
84115e1c 1048 rpc_init_task(task, setup_data);
e8f5d77c 1049 task->tk_flags |= flags;
46121cf7 1050 dprintk("RPC: allocated task %p\n", task);
1da177e4 1051 return task;
1da177e4
LT
1052}
1053
c6567ed1
TM
1054/*
1055 * rpc_free_task - release rpc task and perform cleanups
1056 *
1057 * Note that we free up the rpc_task _after_ rpc_release_calldata()
1058 * in order to work around a workqueue dependency issue.
1059 *
1060 * Tejun Heo states:
1061 * "Workqueue currently considers two work items to be the same if they're
1062 * on the same address and won't execute them concurrently - ie. it
1063 * makes a work item which is queued again while being executed wait
1064 * for the previous execution to complete.
1065 *
1066 * If a work function frees the work item, and then waits for an event
1067 * which should be performed by another work item and *that* work item
1068 * recycles the freed work item, it can create a false dependency loop.
1069 * There really is no reliable way to detect this short of verifying
1070 * every memory free."
1071 *
1072 */
32bfb5c0 1073static void rpc_free_task(struct rpc_task *task)
1da177e4 1074{
c6567ed1
TM
1075 unsigned short tk_flags = task->tk_flags;
1076
1de7eea9 1077 put_rpccred(task->tk_op_cred);
c6567ed1 1078 rpc_release_calldata(task->tk_ops, task->tk_calldata);
1da177e4 1079
c6567ed1 1080 if (tk_flags & RPC_TASK_DYNAMIC) {
5e4424af
TM
1081 dprintk("RPC: %5u freeing task\n", task->tk_pid);
1082 mempool_free(task, rpc_task_mempool);
1083 }
32bfb5c0
TM
1084}
1085
1086static void rpc_async_release(struct work_struct *work)
1087{
a1231fda
TM
1088 unsigned int pflags = memalloc_nofs_save();
1089
32bfb5c0 1090 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
a1231fda 1091 memalloc_nofs_restore(pflags);
32bfb5c0
TM
1092}
1093
bf294b41 1094static void rpc_release_resources_task(struct rpc_task *task)
32bfb5c0 1095{
87ed5003 1096 xprt_release(task);
a271c5a0 1097 if (task->tk_msg.rpc_cred) {
a52458b4 1098 put_cred(task->tk_msg.rpc_cred);
a271c5a0
OH
1099 task->tk_msg.rpc_cred = NULL;
1100 }
58f9612c 1101 rpc_task_release_client(task);
bf294b41
TM
1102}
1103
1104static void rpc_final_put_task(struct rpc_task *task,
1105 struct workqueue_struct *q)
1106{
1107 if (q != NULL) {
32bfb5c0 1108 INIT_WORK(&task->u.tk_work, rpc_async_release);
bf294b41 1109 queue_work(q, &task->u.tk_work);
32bfb5c0
TM
1110 } else
1111 rpc_free_task(task);
e6b3c4db 1112}
bf294b41
TM
1113
1114static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1115{
1116 if (atomic_dec_and_test(&task->tk_count)) {
1117 rpc_release_resources_task(task);
1118 rpc_final_put_task(task, q);
1119 }
1120}
1121
1122void rpc_put_task(struct rpc_task *task)
1123{
1124 rpc_do_put_task(task, NULL);
1125}
e8914c65 1126EXPORT_SYMBOL_GPL(rpc_put_task);
e6b3c4db 1127
bf294b41
TM
1128void rpc_put_task_async(struct rpc_task *task)
1129{
1130 rpc_do_put_task(task, task->tk_workqueue);
1131}
1132EXPORT_SYMBOL_GPL(rpc_put_task_async);
1133
bde8f00c 1134static void rpc_release_task(struct rpc_task *task)
e6b3c4db 1135{
46121cf7 1136 dprintk("RPC: %5u release task\n", task->tk_pid);
1da177e4 1137
0a0c2a57 1138 WARN_ON_ONCE(RPC_IS_QUEUED(task));
1da177e4 1139
bf294b41 1140 rpc_release_resources_task(task);
e6b3c4db 1141
bf294b41
TM
1142 /*
1143 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1144 * so it should be safe to use task->tk_count as a test for whether
1145 * or not any other processes still hold references to our rpc_task.
1146 */
1147 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1148 /* Wake up anyone who may be waiting for task completion */
1149 if (!rpc_complete_task(task))
1150 return;
1151 } else {
1152 if (!atomic_dec_and_test(&task->tk_count))
1153 return;
1154 }
1155 rpc_final_put_task(task, task->tk_workqueue);
1da177e4
LT
1156}
1157
b247bbf1
TM
1158int rpciod_up(void)
1159{
1160 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1161}
1162
1163void rpciod_down(void)
1164{
1165 module_put(THIS_MODULE);
1166}
1167
1da177e4 1168/*
b247bbf1 1169 * Start up the rpciod workqueue.
1da177e4 1170 */
b247bbf1 1171static int rpciod_start(void)
1da177e4
LT
1172{
1173 struct workqueue_struct *wq;
ab418d70 1174
1da177e4
LT
1175 /*
1176 * Create the rpciod thread and wait for it to start.
1177 */
ab418d70 1178 dprintk("RPC: creating workqueue rpciod\n");
f515f86b 1179 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
40a5f1b1
TM
1180 if (!wq)
1181 goto out_failed;
1da177e4 1182 rpciod_workqueue = wq;
40a5f1b1 1183 /* Note: highpri because network receive is latency sensitive */
90ea9f1b 1184 wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
40a5f1b1
TM
1185 if (!wq)
1186 goto free_rpciod;
1187 xprtiod_workqueue = wq;
1188 return 1;
1189free_rpciod:
1190 wq = rpciod_workqueue;
1191 rpciod_workqueue = NULL;
1192 destroy_workqueue(wq);
1193out_failed:
1194 return 0;
1da177e4
LT
1195}
1196
b247bbf1 1197static void rpciod_stop(void)
1da177e4 1198{
b247bbf1 1199 struct workqueue_struct *wq = NULL;
ab418d70 1200
b247bbf1
TM
1201 if (rpciod_workqueue == NULL)
1202 return;
ab418d70 1203 dprintk("RPC: destroying workqueue rpciod\n");
1da177e4 1204
b247bbf1
TM
1205 wq = rpciod_workqueue;
1206 rpciod_workqueue = NULL;
1207 destroy_workqueue(wq);
40a5f1b1
TM
1208 wq = xprtiod_workqueue;
1209 xprtiod_workqueue = NULL;
1210 destroy_workqueue(wq);
1da177e4
LT
1211}
1212
1da177e4
LT
1213void
1214rpc_destroy_mempool(void)
1215{
b247bbf1 1216 rpciod_stop();
17a9618e
JL
1217 mempool_destroy(rpc_buffer_mempool);
1218 mempool_destroy(rpc_task_mempool);
1219 kmem_cache_destroy(rpc_task_slabp);
1220 kmem_cache_destroy(rpc_buffer_slabp);
f6a1cc89 1221 rpc_destroy_wait_queue(&delay_queue);
1da177e4
LT
1222}
1223
1224int
1225rpc_init_mempool(void)
1226{
f6a1cc89
TM
1227 /*
1228 * The following is not strictly a mempool initialisation,
1229 * but there is no harm in doing it here
1230 */
1231 rpc_init_wait_queue(&delay_queue, "delayq");
1232 if (!rpciod_start())
1233 goto err_nomem;
1234
1da177e4
LT
1235 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1236 sizeof(struct rpc_task),
1237 0, SLAB_HWCACHE_ALIGN,
20c2df83 1238 NULL);
1da177e4
LT
1239 if (!rpc_task_slabp)
1240 goto err_nomem;
1241 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1242 RPC_BUFFER_MAXSIZE,
1243 0, SLAB_HWCACHE_ALIGN,
20c2df83 1244 NULL);
1da177e4
LT
1245 if (!rpc_buffer_slabp)
1246 goto err_nomem;
93d2341c
MD
1247 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1248 rpc_task_slabp);
1da177e4
LT
1249 if (!rpc_task_mempool)
1250 goto err_nomem;
93d2341c
MD
1251 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1252 rpc_buffer_slabp);
1da177e4
LT
1253 if (!rpc_buffer_mempool)
1254 goto err_nomem;
1255 return 0;
1256err_nomem:
1257 rpc_destroy_mempool();
1258 return -ENOMEM;
1259}