]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/sunrpc/sched.c
UBUNTU: Ubuntu-5.15.0-39.42
[mirror_ubuntu-jammy-kernel.git] / net / sunrpc / sched.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/net/sunrpc/sched.c
4 *
5 * Scheduling for synchronous and asynchronous RPC requests.
6 *
7 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
cca5172a 8 *
1da177e4
LT
9 * TCP NFS related read + write fixes
10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11 */
12
13#include <linux/module.h>
14
15#include <linux/sched.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/mempool.h>
19#include <linux/smp.h>
1da177e4 20#include <linux/spinlock.h>
4a3e2f71 21#include <linux/mutex.h>
d310310c 22#include <linux/freezer.h>
a1231fda 23#include <linux/sched/mm.h>
1da177e4
LT
24
25#include <linux/sunrpc/clnt.h>
9dfe52a9 26#include <linux/sunrpc/metrics.h>
1da177e4 27
6951867b
BH
28#include "sunrpc.h"
29
82b0a4c3
TM
30#define CREATE_TRACE_POINTS
31#include <trace/events/sunrpc.h>
32
1da177e4
LT
33/*
34 * RPC slabs and memory pools
35 */
36#define RPC_BUFFER_MAXSIZE (2048)
37#define RPC_BUFFER_POOLSIZE (8)
38#define RPC_TASK_POOLSIZE (8)
e18b890b
CL
39static struct kmem_cache *rpc_task_slabp __read_mostly;
40static struct kmem_cache *rpc_buffer_slabp __read_mostly;
ba89966c
ED
41static mempool_t *rpc_task_mempool __read_mostly;
42static mempool_t *rpc_buffer_mempool __read_mostly;
1da177e4 43
65f27f38 44static void rpc_async_schedule(struct work_struct *);
bde8f00c 45static void rpc_release_task(struct rpc_task *task);
7e0a0e38 46static void __rpc_queue_timer_fn(struct work_struct *);
1da177e4 47
1da177e4
LT
48/*
49 * RPC tasks sit here while waiting for conditions to improve.
50 */
a4a87499 51static struct rpc_wait_queue delay_queue;
1da177e4 52
1da177e4
LT
53/*
54 * rpciod-related stuff
55 */
40a5f1b1
TM
56struct workqueue_struct *rpciod_workqueue __read_mostly;
57struct workqueue_struct *xprtiod_workqueue __read_mostly;
675dd90a 58EXPORT_SYMBOL_GPL(xprtiod_workqueue);
1da177e4 59
5efd1876
TM
60unsigned long
61rpc_task_timeout(const struct rpc_task *task)
62{
63 unsigned long timeout = READ_ONCE(task->tk_timeout);
64
65 if (timeout != 0) {
66 unsigned long now = jiffies;
67 if (time_before(now, timeout))
68 return timeout - now;
69 }
70 return 0;
71}
72EXPORT_SYMBOL_GPL(rpc_task_timeout);
73
1da177e4
LT
74/*
75 * Disable the timer for a given RPC task. Should be called with
76 * queue->lock and bh_disabled in order to avoid races within
77 * rpc_run_timer().
78 */
5d00837b 79static void
eb276c0e 80__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 81{
6b2e6856 82 if (list_empty(&task->u.tk_wait.timer_list))
36df9aae 83 return;
1da177e4 84 task->tk_timeout = 0;
36df9aae 85 list_del(&task->u.tk_wait.timer_list);
eb276c0e 86 if (list_empty(&queue->timer_list.list))
7e0a0e38 87 cancel_delayed_work(&queue->timer_list.dwork);
36df9aae
TM
88}
89
90static void
91rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
92{
7e0a0e38
TM
93 unsigned long now = jiffies;
94 queue->timer_list.expires = expires;
95 if (time_before_eq(expires, now))
96 expires = 0;
97 else
98 expires -= now;
99 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
1da177e4
LT
100}
101
1da177e4
LT
102/*
103 * Set up a timer for the current task.
104 */
5d00837b 105static void
6b2e6856
TM
106__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
107 unsigned long timeout)
1da177e4 108{
6b2e6856 109 task->tk_timeout = timeout;
7e0a0e38
TM
110 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
111 rpc_set_queue_timer(queue, timeout);
eb276c0e 112 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
1da177e4
LT
113}
114
c05eecf6
TM
115static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
116{
edd2e36f 117 if (queue->priority != priority) {
edd2e36f 118 queue->priority = priority;
f42f7c28 119 queue->nr = 1U << priority;
edd2e36f 120 }
c05eecf6
TM
121}
122
c05eecf6
TM
123static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
124{
125 rpc_set_waitqueue_priority(queue, queue->maxpriority);
c05eecf6
TM
126}
127
1da177e4 128/*
f42f7c28 129 * Add a request to a queue list
1da177e4 130 */
f42f7c28
TM
131static void
132__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
1da177e4 133{
1da177e4
LT
134 struct rpc_task *t;
135
1da177e4 136 list_for_each_entry(t, q, u.tk_wait.list) {
3ff7576d 137 if (t->tk_owner == task->tk_owner) {
f42f7c28
TM
138 list_add_tail(&task->u.tk_wait.links,
139 &t->u.tk_wait.links);
140 /* Cache the queue head in task->u.tk_wait.list */
141 task->u.tk_wait.list.next = q;
142 task->u.tk_wait.list.prev = NULL;
1da177e4
LT
143 return;
144 }
145 }
f42f7c28 146 INIT_LIST_HEAD(&task->u.tk_wait.links);
1da177e4
LT
147 list_add_tail(&task->u.tk_wait.list, q);
148}
149
f42f7c28
TM
150/*
151 * Remove request from a queue list
152 */
153static void
154__rpc_list_dequeue_task(struct rpc_task *task)
155{
156 struct list_head *q;
157 struct rpc_task *t;
158
159 if (task->u.tk_wait.list.prev == NULL) {
160 list_del(&task->u.tk_wait.links);
161 return;
162 }
163 if (!list_empty(&task->u.tk_wait.links)) {
164 t = list_first_entry(&task->u.tk_wait.links,
165 struct rpc_task,
166 u.tk_wait.links);
167 /* Assume __rpc_list_enqueue_task() cached the queue head */
168 q = t->u.tk_wait.list.next;
169 list_add_tail(&t->u.tk_wait.list, q);
170 list_del(&task->u.tk_wait.links);
171 }
172 list_del(&task->u.tk_wait.list);
173}
174
175/*
176 * Add new request to a priority queue.
177 */
178static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
179 struct rpc_task *task,
180 unsigned char queue_priority)
181{
182 if (unlikely(queue_priority > queue->maxpriority))
183 queue_priority = queue->maxpriority;
184 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
185}
186
1da177e4
LT
187/*
188 * Add new request to wait queue.
1da177e4 189 */
3b27bad7
TM
190static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
191 struct rpc_task *task,
192 unsigned char queue_priority)
1da177e4 193{
6b2e6856 194 INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
1da177e4 195 if (RPC_IS_PRIORITY(queue))
3b27bad7 196 __rpc_add_wait_queue_priority(queue, task, queue_priority);
1da177e4
LT
197 else
198 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
96ef13b2 199 task->tk_waitqueue = queue;
e19b63da 200 queue->qlen++;
1166fde6
TM
201 /* barrier matches the read in rpc_wake_up_task_queue_locked() */
202 smp_wmb();
1da177e4 203 rpc_set_queued(task);
1da177e4
LT
204}
205
206/*
207 * Remove request from a priority queue.
208 */
209static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
210{
f42f7c28 211 __rpc_list_dequeue_task(task);
1da177e4
LT
212}
213
214/*
215 * Remove request from queue.
216 * Note: must be called with spin lock held.
217 */
96ef13b2 218static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 219{
eb276c0e 220 __rpc_disable_timer(queue, task);
1da177e4
LT
221 if (RPC_IS_PRIORITY(queue))
222 __rpc_remove_wait_queue_priority(task);
f42f7c28
TM
223 else
224 list_del(&task->u.tk_wait.list);
e19b63da 225 queue->qlen--;
1da177e4
LT
226}
227
3ff7576d 228static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
1da177e4
LT
229{
230 int i;
231
232 spin_lock_init(&queue->lock);
233 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
234 INIT_LIST_HEAD(&queue->tasks[i]);
3ff7576d 235 queue->maxpriority = nr_queues - 1;
1da177e4 236 rpc_reset_waitqueue_priority(queue);
36df9aae 237 queue->qlen = 0;
7e0a0e38 238 queue->timer_list.expires = 0;
66eb3add 239 INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
36df9aae 240 INIT_LIST_HEAD(&queue->timer_list.list);
2f09c242 241 rpc_assign_waitqueue_name(queue, qname);
1da177e4
LT
242}
243
244void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
245{
3ff7576d 246 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
1da177e4 247}
689cf5c1 248EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
1da177e4
LT
249
250void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
251{
3ff7576d 252 __rpc_init_priority_wait_queue(queue, qname, 1);
1da177e4 253}
e8914c65 254EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
1da177e4 255
f6a1cc89
TM
256void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
257{
7e0a0e38 258 cancel_delayed_work_sync(&queue->timer_list.dwork);
f6a1cc89
TM
259}
260EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
261
dfd01f02 262static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
44c28873 263{
416ad3c9 264 freezable_schedule_unsafe();
dfd01f02
PZ
265 if (signal_pending_state(mode, current))
266 return -ERESTARTSYS;
44c28873
TM
267 return 0;
268}
269
1306729b 270#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
c44fe705
TM
271static void rpc_task_set_debuginfo(struct rpc_task *task)
272{
273 static atomic_t rpc_pid;
274
c44fe705
TM
275 task->tk_pid = atomic_inc_return(&rpc_pid);
276}
277#else
278static inline void rpc_task_set_debuginfo(struct rpc_task *task)
279{
280}
281#endif
282
e6b3c4db
TM
283static void rpc_set_active(struct rpc_task *task)
284{
c44fe705 285 rpc_task_set_debuginfo(task);
58f9612c 286 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
e671edb9 287 trace_rpc_task_begin(task, NULL);
e6b3c4db
TM
288}
289
44c28873
TM
290/*
291 * Mark an RPC call as having completed by clearing the 'active' bit
bf294b41 292 * and then waking up all tasks that were sleeping.
44c28873 293 */
bf294b41 294static int rpc_complete_task(struct rpc_task *task)
44c28873 295{
bf294b41
TM
296 void *m = &task->tk_runstate;
297 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
298 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
299 unsigned long flags;
300 int ret;
301
e671edb9 302 trace_rpc_task_complete(task, NULL);
82b0a4c3 303
bf294b41 304 spin_lock_irqsave(&wq->lock, flags);
e6b3c4db 305 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
bf294b41
TM
306 ret = atomic_dec_and_test(&task->tk_count);
307 if (waitqueue_active(wq))
ac5be6b4 308 __wake_up_locked_key(wq, TASK_NORMAL, &k);
bf294b41
TM
309 spin_unlock_irqrestore(&wq->lock, flags);
310 return ret;
44c28873
TM
311}
312
313/*
314 * Allow callers to wait for completion of an RPC call
bf294b41
TM
315 *
316 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
317 * to enforce taking of the wq->lock and hence avoid races with
318 * rpc_complete_task().
44c28873 319 */
c1221321 320int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
44c28873
TM
321{
322 if (action == NULL)
150030b7 323 action = rpc_wait_bit_killable;
bf294b41 324 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
150030b7 325 action, TASK_KILLABLE);
44c28873 326}
e8914c65 327EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
44c28873 328
1da177e4
LT
329/*
330 * Make an RPC task runnable.
331 *
506026c3
JL
332 * Note: If the task is ASYNC, and is being made runnable after sitting on an
333 * rpc_wait_queue, this must be called with the queue spinlock held to protect
334 * the wait queue operation.
a3c3cac5
TM
335 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
336 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
337 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
338 * the RPC_TASK_RUNNING flag.
1da177e4 339 */
f1dc237c
TM
340static void rpc_make_runnable(struct workqueue_struct *wq,
341 struct rpc_task *task)
1da177e4 342{
a3c3cac5
TM
343 bool need_wakeup = !rpc_test_and_set_running(task);
344
1da177e4 345 rpc_clear_queued(task);
a3c3cac5 346 if (!need_wakeup)
cc4dc59e 347 return;
1da177e4 348 if (RPC_IS_ASYNC(task)) {
65f27f38 349 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
f1dc237c 350 queue_work(wq, &task->u.tk_work);
1da177e4 351 } else
96651ab3 352 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
1da177e4
LT
353}
354
1da177e4
LT
355/*
356 * Prepare for sleeping on a wait queue.
357 * By always appending tasks to the list we ensure FIFO behavior.
358 * NB: An RPC task will only receive interrupt-driven events as long
359 * as it's on a wait queue.
360 */
1fab7dc4 361static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
3b27bad7 362 struct rpc_task *task,
3b27bad7 363 unsigned char queue_priority)
1da177e4 364{
e671edb9 365 trace_rpc_task_sleep(task, q);
82b0a4c3 366
3b27bad7 367 __rpc_add_wait_queue(q, task, queue_priority);
6b2e6856
TM
368}
369
1fab7dc4
TM
370static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
371 struct rpc_task *task,
372 unsigned char queue_priority)
373{
374 if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
375 return;
376 __rpc_do_sleep_on_priority(q, task, queue_priority);
377}
378
6b2e6856
TM
379static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
380 struct rpc_task *task, unsigned long timeout,
381 unsigned char queue_priority)
382{
1fab7dc4
TM
383 if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
384 return;
6b2e6856 385 if (time_is_after_jiffies(timeout)) {
1fab7dc4 386 __rpc_do_sleep_on_priority(q, task, queue_priority);
6b2e6856
TM
387 __rpc_add_timer(q, task, timeout);
388 } else
389 task->tk_status = -ETIMEDOUT;
1da177e4
LT
390}
391
87150aae
TM
392static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
393{
394 if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
395 task->tk_callback = action;
396}
397
398static bool rpc_sleep_check_activated(struct rpc_task *task)
1da177e4 399{
58f9612c 400 /* We shouldn't ever put an inactive task to sleep */
87150aae 401 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
e454a7a8
WAA
402 task->tk_status = -EIO;
403 rpc_put_task_async(task);
87150aae 404 return false;
e454a7a8 405 }
87150aae
TM
406 return true;
407}
408
6b2e6856
TM
409void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
410 rpc_action action, unsigned long timeout)
411{
412 if (!rpc_sleep_check_activated(task))
413 return;
414
415 rpc_set_tk_callback(task, action);
416
417 /*
418 * Protect the queue operations.
419 */
c049f8ea 420 spin_lock(&q->lock);
6b2e6856 421 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
c049f8ea 422 spin_unlock(&q->lock);
6b2e6856
TM
423}
424EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
425
87150aae
TM
426void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
427 rpc_action action)
428{
429 if (!rpc_sleep_check_activated(task))
430 return;
431
432 rpc_set_tk_callback(task, action);
e6b3c4db 433
6b2e6856 434 WARN_ON_ONCE(task->tk_timeout != 0);
1da177e4
LT
435 /*
436 * Protect the queue operations.
437 */
c049f8ea 438 spin_lock(&q->lock);
87150aae 439 __rpc_sleep_on_priority(q, task, task->tk_priority);
c049f8ea 440 spin_unlock(&q->lock);
1da177e4 441}
e8914c65 442EXPORT_SYMBOL_GPL(rpc_sleep_on);
1da177e4 443
6b2e6856
TM
444void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
445 struct rpc_task *task, unsigned long timeout, int priority)
446{
447 if (!rpc_sleep_check_activated(task))
448 return;
449
450 priority -= RPC_PRIORITY_LOW;
451 /*
452 * Protect the queue operations.
453 */
c049f8ea 454 spin_lock(&q->lock);
6b2e6856 455 __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
c049f8ea 456 spin_unlock(&q->lock);
6b2e6856
TM
457}
458EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
459
3b27bad7 460void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
8357a9b6 461 int priority)
3b27bad7 462{
87150aae 463 if (!rpc_sleep_check_activated(task))
e454a7a8 464 return;
87150aae 465
6b2e6856 466 WARN_ON_ONCE(task->tk_timeout != 0);
8357a9b6 467 priority -= RPC_PRIORITY_LOW;
3b27bad7
TM
468 /*
469 * Protect the queue operations.
470 */
c049f8ea 471 spin_lock(&q->lock);
8357a9b6 472 __rpc_sleep_on_priority(q, task, priority);
c049f8ea 473 spin_unlock(&q->lock);
3b27bad7 474}
1e1093c7 475EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
3b27bad7 476
1da177e4 477/**
f1dc237c
TM
478 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
479 * @wq: workqueue on which to run task
96ef13b2 480 * @queue: wait queue
1da177e4
LT
481 * @task: task to be woken up
482 *
483 * Caller must hold queue->lock, and have cleared the task queued flag.
484 */
f1dc237c
TM
485static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
486 struct rpc_wait_queue *queue,
487 struct rpc_task *task)
1da177e4 488{
1da177e4
LT
489 /* Has the task been executed yet? If not, we cannot wake it up! */
490 if (!RPC_IS_ACTIVATED(task)) {
491 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
492 return;
493 }
494
e671edb9 495 trace_rpc_task_wakeup(task, queue);
82b0a4c3 496
96ef13b2 497 __rpc_remove_wait_queue(queue, task);
1da177e4 498
f1dc237c 499 rpc_make_runnable(wq, task);
1da177e4
LT
500}
501
502/*
96ef13b2 503 * Wake up a queued task while the queue lock is being held
1da177e4 504 */
359c48c0
TM
505static struct rpc_task *
506rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
507 struct rpc_wait_queue *queue, struct rpc_task *task,
508 bool (*action)(struct rpc_task *, void *), void *data)
1da177e4 509{
1166fde6
TM
510 if (RPC_IS_QUEUED(task)) {
511 smp_rmb();
359c48c0
TM
512 if (task->tk_waitqueue == queue) {
513 if (action == NULL || action(task, data)) {
514 __rpc_do_wake_up_task_on_wq(wq, queue, task);
515 return task;
516 }
517 }
1166fde6 518 }
359c48c0
TM
519 return NULL;
520}
521
f1dc237c
TM
522/*
523 * Wake up a queued task while the queue lock is being held
524 */
691b45dd
CL
525static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
526 struct rpc_task *task)
f1dc237c 527{
691b45dd
CL
528 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
529 task, NULL, NULL);
2275cde4
TM
530}
531
1da177e4 532/*
96ef13b2 533 * Wake up a task on a specific queue
1da177e4 534 */
96ef13b2 535void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 536{
5ce97039
TM
537 if (!RPC_IS_QUEUED(task))
538 return;
c049f8ea 539 spin_lock(&queue->lock);
96ef13b2 540 rpc_wake_up_task_queue_locked(queue, task);
c049f8ea 541 spin_unlock(&queue->lock);
1da177e4 542}
96ef13b2
TM
543EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
544
359c48c0
TM
545static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
546{
547 task->tk_status = *(int *)status;
548 return true;
549}
550
551static void
552rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
553 struct rpc_task *task, int status)
554{
555 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
556 task, rpc_task_action_set_status, &status);
557}
558
559/**
560 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
561 * @queue: pointer to rpc_wait_queue
562 * @task: pointer to rpc_task
563 * @status: integer error value
564 *
565 * If @task is queued on @queue, then it is woken up, and @task->tk_status is
566 * set to the value of @status.
567 */
568void
569rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
570 struct rpc_task *task, int status)
571{
572 if (!RPC_IS_QUEUED(task))
573 return;
c049f8ea 574 spin_lock(&queue->lock);
359c48c0 575 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
c049f8ea 576 spin_unlock(&queue->lock);
359c48c0
TM
577}
578
1da177e4
LT
579/*
580 * Wake up the next task on a priority queue.
581 */
961a828d 582static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
1da177e4
LT
583{
584 struct list_head *q;
585 struct rpc_task *task;
586
5483b904
ZX
587 /*
588 * Service the privileged queue.
589 */
590 q = &queue->tasks[RPC_NR_PRIORITY - 1];
591 if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
592 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
593 goto out;
594 }
595
1da177e4 596 /*
3ff7576d 597 * Service a batch of tasks from a single owner.
1da177e4
LT
598 */
599 q = &queue->tasks[queue->priority];
fcb170a9
ZX
600 if (!list_empty(q) && queue->nr) {
601 queue->nr--;
f42f7c28
TM
602 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
603 goto out;
1da177e4
LT
604 }
605
606 /*
607 * Service the next queue.
608 */
609 do {
610 if (q == &queue->tasks[0])
611 q = &queue->tasks[queue->maxpriority];
612 else
613 q = q - 1;
614 if (!list_empty(q)) {
f42f7c28 615 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
1da177e4
LT
616 goto new_queue;
617 }
618 } while (q != &queue->tasks[queue->priority]);
619
620 rpc_reset_waitqueue_priority(queue);
621 return NULL;
622
623new_queue:
624 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
1da177e4 625out:
1da177e4
LT
626 return task;
627}
628
961a828d
TM
629static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
630{
631 if (RPC_IS_PRIORITY(queue))
632 return __rpc_find_next_queued_priority(queue);
633 if (!list_empty(&queue->tasks[0]))
634 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
635 return NULL;
636}
637
1da177e4 638/*
961a828d 639 * Wake up the first task on the wait queue.
1da177e4 640 */
f1dc237c
TM
641struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
642 struct rpc_wait_queue *queue,
961a828d 643 bool (*func)(struct rpc_task *, void *), void *data)
1da177e4
LT
644{
645 struct rpc_task *task = NULL;
646
c049f8ea 647 spin_lock(&queue->lock);
961a828d 648 task = __rpc_find_next_queued(queue);
359c48c0
TM
649 if (task != NULL)
650 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
651 task, func, data);
c049f8ea 652 spin_unlock(&queue->lock);
1da177e4
LT
653
654 return task;
655}
f1dc237c
TM
656
657/*
658 * Wake up the first task on the wait queue.
659 */
660struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
661 bool (*func)(struct rpc_task *, void *), void *data)
662{
663 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
664}
961a828d
TM
665EXPORT_SYMBOL_GPL(rpc_wake_up_first);
666
667static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
668{
669 return true;
670}
671
672/*
673 * Wake up the next task on the wait queue.
674*/
675struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
676{
677 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
678}
e8914c65 679EXPORT_SYMBOL_GPL(rpc_wake_up_next);
1da177e4 680
e4c72201
TM
681/**
682 * rpc_wake_up_locked - wake up all rpc_tasks
683 * @queue: rpc_wait_queue on which the tasks are sleeping
684 *
685 */
686static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
687{
688 struct rpc_task *task;
689
690 for (;;) {
691 task = __rpc_find_next_queued(queue);
692 if (task == NULL)
693 break;
694 rpc_wake_up_task_queue_locked(queue, task);
695 }
696}
697
1da177e4
LT
698/**
699 * rpc_wake_up - wake up all rpc_tasks
700 * @queue: rpc_wait_queue on which the tasks are sleeping
701 *
702 * Grabs queue->lock
703 */
704void rpc_wake_up(struct rpc_wait_queue *queue)
705{
c049f8ea 706 spin_lock(&queue->lock);
e4c72201
TM
707 rpc_wake_up_locked(queue);
708 spin_unlock(&queue->lock);
709}
710EXPORT_SYMBOL_GPL(rpc_wake_up);
711
712/**
713 * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
714 * @queue: rpc_wait_queue on which the tasks are sleeping
715 * @status: status value to set
716 */
717static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
718{
719 struct rpc_task *task;
720
1da177e4 721 for (;;) {
e4c72201
TM
722 task = __rpc_find_next_queued(queue);
723 if (task == NULL)
1da177e4 724 break;
e4c72201 725 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
1da177e4 726 }
1da177e4
LT
727}
728
729/**
730 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
731 * @queue: rpc_wait_queue on which the tasks are sleeping
732 * @status: status value to set
733 *
734 * Grabs queue->lock
735 */
736void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
737{
c049f8ea 738 spin_lock(&queue->lock);
e4c72201 739 rpc_wake_up_status_locked(queue, status);
c049f8ea 740 spin_unlock(&queue->lock);
1da177e4 741}
e8914c65 742EXPORT_SYMBOL_GPL(rpc_wake_up_status);
1da177e4 743
7e0a0e38 744static void __rpc_queue_timer_fn(struct work_struct *work)
36df9aae 745{
7e0a0e38
TM
746 struct rpc_wait_queue *queue = container_of(work,
747 struct rpc_wait_queue,
748 timer_list.dwork.work);
36df9aae
TM
749 struct rpc_task *task, *n;
750 unsigned long expires, now, timeo;
751
752 spin_lock(&queue->lock);
753 expires = now = jiffies;
754 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
6b2e6856 755 timeo = task->tk_timeout;
36df9aae 756 if (time_after_eq(now, timeo)) {
721a1d38 757 trace_rpc_task_timeout(task, task->tk_action);
36df9aae
TM
758 task->tk_status = -ETIMEDOUT;
759 rpc_wake_up_task_queue_locked(queue, task);
760 continue;
761 }
762 if (expires == now || time_after(expires, timeo))
763 expires = timeo;
764 }
765 if (!list_empty(&queue->timer_list.list))
766 rpc_set_queue_timer(queue, expires);
767 spin_unlock(&queue->lock);
768}
769
8014793b
TM
770static void __rpc_atrun(struct rpc_task *task)
771{
6bd14416
TM
772 if (task->tk_status == -ETIMEDOUT)
773 task->tk_status = 0;
8014793b
TM
774}
775
1da177e4
LT
776/*
777 * Run a task at a later time
778 */
8014793b 779void rpc_delay(struct rpc_task *task, unsigned long delay)
1da177e4 780{
6b2e6856 781 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
1da177e4 782}
e8914c65 783EXPORT_SYMBOL_GPL(rpc_delay);
1da177e4 784
4ce70ada
TM
785/*
786 * Helper to call task->tk_ops->rpc_call_prepare
787 */
aae2006e 788void rpc_prepare_task(struct rpc_task *task)
4ce70ada
TM
789{
790 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
791}
792
7fdcf13b
TM
793static void
794rpc_init_task_statistics(struct rpc_task *task)
795{
796 /* Initialize retry counters */
797 task->tk_garb_retry = 2;
798 task->tk_cred_retry = 2;
799 task->tk_rebind_retry = 2;
800
801 /* starting timestamp */
802 task->tk_start = ktime_get();
803}
804
805static void
806rpc_reset_task_statistics(struct rpc_task *task)
807{
808 task->tk_timeouts = 0;
ae67bd38 809 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
7fdcf13b
TM
810 rpc_init_task_statistics(task);
811}
812
d05fdb0c 813/*
963d8fe5 814 * Helper that calls task->tk_ops->rpc_call_done if it exists
d05fdb0c 815 */
abbcf28f 816void rpc_exit_task(struct rpc_task *task)
d05fdb0c 817{
a264abad 818 trace_rpc_task_end(task, task->tk_action);
abbcf28f 819 task->tk_action = NULL;
9dfe52a9
DW
820 if (task->tk_ops->rpc_count_stats)
821 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
822 else if (task->tk_client)
823 rpc_count_iostats(task, task->tk_client->cl_metrics);
963d8fe5
TM
824 if (task->tk_ops->rpc_call_done != NULL) {
825 task->tk_ops->rpc_call_done(task, task->tk_calldata);
d05fdb0c 826 if (task->tk_action != NULL) {
abbcf28f
TM
827 /* Always release the RPC slot and buffer memory */
828 xprt_release(task);
7fdcf13b 829 rpc_reset_task_statistics(task);
d05fdb0c
TM
830 }
831 }
d05fdb0c 832}
d9b6cd94 833
ae67bd38
TM
834void rpc_signal_task(struct rpc_task *task)
835{
836 struct rpc_wait_queue *queue;
837
838 if (!RPC_IS_ACTIVATED(task))
839 return;
abf8af78
CL
840
841 trace_rpc_task_signalled(task, task->tk_action);
ae67bd38
TM
842 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
843 smp_mb__after_atomic();
844 queue = READ_ONCE(task->tk_waitqueue);
845 if (queue)
846 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
847}
848
d9b6cd94
TM
849void rpc_exit(struct rpc_task *task, int status)
850{
851 task->tk_status = status;
852 task->tk_action = rpc_exit_task;
6b5f5900 853 rpc_wake_up_queued_task(task->tk_waitqueue, task);
d9b6cd94
TM
854}
855EXPORT_SYMBOL_GPL(rpc_exit);
d05fdb0c 856
bbd5a1f9
TM
857void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
858{
a86dc496 859 if (ops->rpc_release != NULL)
bbd5a1f9 860 ops->rpc_release(calldata);
bbd5a1f9
TM
861}
862
1da177e4
LT
863/*
864 * This is the RPC `scheduler' (or rather, the finite state machine).
865 */
2efef837 866static void __rpc_execute(struct rpc_task *task)
1da177e4 867{
eb9b55ab
TM
868 struct rpc_wait_queue *queue;
869 int task_is_async = RPC_IS_ASYNC(task);
870 int status = 0;
1da177e4 871
2bd4eef8
WAA
872 WARN_ON_ONCE(RPC_IS_QUEUED(task));
873 if (RPC_IS_QUEUED(task))
874 return;
1da177e4 875
d05fdb0c 876 for (;;) {
b55c5989 877 void (*do_action)(struct rpc_task *);
1da177e4
LT
878
879 /*
21ead9ff
CL
880 * Perform the next FSM step or a pending callback.
881 *
882 * tk_action may be NULL if the task has been killed.
883 * In particular, note that rpc_killall_tasks may
884 * do this at any time, so beware when dereferencing.
1da177e4 885 */
21ead9ff
CL
886 do_action = task->tk_action;
887 if (task->tk_callback) {
888 do_action = task->tk_callback;
889 task->tk_callback = NULL;
1da177e4 890 }
21ead9ff
CL
891 if (!do_action)
892 break;
e671edb9 893 trace_rpc_task_run_action(task, do_action);
b55c5989 894 do_action(task);
1da177e4
LT
895
896 /*
897 * Lockless check for whether task is sleeping or not.
898 */
899 if (!RPC_IS_QUEUED(task))
900 continue;
ae67bd38
TM
901
902 /*
903 * Signalled tasks should exit rather than sleep.
904 */
714fbc73
TM
905 if (RPC_SIGNALLED(task)) {
906 task->tk_rpc_status = -ERESTARTSYS;
ae67bd38 907 rpc_exit(task, -ERESTARTSYS);
714fbc73 908 }
ae67bd38 909
eb9b55ab
TM
910 /*
911 * The queue->lock protects against races with
912 * rpc_make_runnable().
913 *
914 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
915 * rpc_task, rpc_make_runnable() can assign it to a
916 * different workqueue. We therefore cannot assume that the
917 * rpc_task pointer may still be dereferenced.
918 */
919 queue = task->tk_waitqueue;
c049f8ea 920 spin_lock(&queue->lock);
eb9b55ab 921 if (!RPC_IS_QUEUED(task)) {
c049f8ea 922 spin_unlock(&queue->lock);
1da177e4
LT
923 continue;
924 }
eb9b55ab 925 rpc_clear_running(task);
c049f8ea 926 spin_unlock(&queue->lock);
eb9b55ab
TM
927 if (task_is_async)
928 return;
1da177e4
LT
929
930 /* sync task: sleep here */
1466c221 931 trace_rpc_task_sync_sleep(task, task->tk_action);
96651ab3 932 status = out_of_line_wait_on_bit(&task->tk_runstate,
150030b7
MW
933 RPC_TASK_QUEUED, rpc_wait_bit_killable,
934 TASK_KILLABLE);
ae67bd38 935 if (status < 0) {
1da177e4
LT
936 /*
937 * When a sync task receives a signal, it exits with
938 * -ERESTARTSYS. In order to catch any callbacks that
939 * clean up after sleeping on some queue, we don't
940 * break the loop here, but go around once more.
941 */
abf8af78 942 trace_rpc_task_signalled(task, task->tk_action);
ae67bd38 943 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
714fbc73 944 task->tk_rpc_status = -ERESTARTSYS;
96651ab3 945 rpc_exit(task, -ERESTARTSYS);
1da177e4 946 }
1466c221 947 trace_rpc_task_sync_wake(task, task->tk_action);
1da177e4
LT
948 }
949
1da177e4
LT
950 /* Release all resources associated with the task */
951 rpc_release_task(task);
1da177e4
LT
952}
953
954/*
955 * User-visible entry point to the scheduler.
956 *
957 * This may be called recursively if e.g. an async NFS task updates
958 * the attributes and finds that dirty pages must be flushed.
959 * NOTE: Upon exit of this function the task is guaranteed to be
960 * released. In particular note that tk_release() will have
961 * been called, so your task memory may have been freed.
962 */
2efef837 963void rpc_execute(struct rpc_task *task)
1da177e4 964{
a76580fb
TM
965 bool is_async = RPC_IS_ASYNC(task);
966
44c28873 967 rpc_set_active(task);
f1dc237c 968 rpc_make_runnable(rpciod_workqueue, task);
f0940f4b
BC
969 if (!is_async) {
970 unsigned int pflags = memalloc_nofs_save();
d6a1ed08 971 __rpc_execute(task);
f0940f4b
BC
972 memalloc_nofs_restore(pflags);
973 }
1da177e4
LT
974}
975
65f27f38 976static void rpc_async_schedule(struct work_struct *work)
1da177e4 977{
a1231fda
TM
978 unsigned int pflags = memalloc_nofs_save();
979
65f27f38 980 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
a1231fda 981 memalloc_nofs_restore(pflags);
1da177e4
LT
982}
983
02107148 984/**
5fe6eaa1
CL
985 * rpc_malloc - allocate RPC buffer resources
986 * @task: RPC task
987 *
988 * A single memory region is allocated, which is split between the
989 * RPC call and RPC reply that this task is being used for. When
990 * this RPC is retired, the memory is released by calling rpc_free.
1da177e4 991 *
c5a4dd8b 992 * To prevent rpciod from hanging, this allocator never sleeps,
5fe6eaa1
CL
993 * returning -ENOMEM and suppressing warning if the request cannot
994 * be serviced immediately. The caller can arrange to sleep in a
995 * way that is safe for rpciod.
c5a4dd8b
CL
996 *
997 * Most requests are 'small' (under 2KiB) and can be serviced from a
998 * mempool, ensuring that NFS reads and writes can always proceed,
999 * and that there is good locality of reference for these buffers.
1da177e4 1000 */
5fe6eaa1 1001int rpc_malloc(struct rpc_task *task)
1da177e4 1002{
5fe6eaa1
CL
1003 struct rpc_rqst *rqst = task->tk_rqstp;
1004 size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
aa3d1fae 1005 struct rpc_buffer *buf;
12a3ad61 1006 gfp_t gfp = GFP_NOFS;
a564b8f0 1007
bfd1d8f2
N
1008 if (RPC_IS_ASYNC(task))
1009 gfp = GFP_NOWAIT | __GFP_NOWARN;
a564b8f0 1010 if (RPC_IS_SWAPPER(task))
bfd1d8f2 1011 gfp |= __GFP_MEMALLOC;
1da177e4 1012
aa3d1fae 1013 size += sizeof(struct rpc_buffer);
c5a4dd8b
CL
1014 if (size <= RPC_BUFFER_MAXSIZE)
1015 buf = mempool_alloc(rpc_buffer_mempool, gfp);
1da177e4 1016 else
c5a4dd8b 1017 buf = kmalloc(size, gfp);
ddce40df
PZ
1018
1019 if (!buf)
5fe6eaa1 1020 return -ENOMEM;
ddce40df 1021
aa3d1fae 1022 buf->len = size;
5fe6eaa1 1023 rqst->rq_buffer = buf->data;
68778945 1024 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
5fe6eaa1 1025 return 0;
1da177e4 1026}
12444809 1027EXPORT_SYMBOL_GPL(rpc_malloc);
1da177e4 1028
02107148 1029/**
3435c74a
CL
1030 * rpc_free - free RPC buffer resources allocated via rpc_malloc
1031 * @task: RPC task
02107148
CL
1032 *
1033 */
3435c74a 1034void rpc_free(struct rpc_task *task)
1da177e4 1035{
3435c74a 1036 void *buffer = task->tk_rqstp->rq_buffer;
aa3d1fae
CL
1037 size_t size;
1038 struct rpc_buffer *buf;
02107148 1039
aa3d1fae
CL
1040 buf = container_of(buffer, struct rpc_buffer, data);
1041 size = buf->len;
c5a4dd8b 1042
c5a4dd8b
CL
1043 if (size <= RPC_BUFFER_MAXSIZE)
1044 mempool_free(buf, rpc_buffer_mempool);
1045 else
1046 kfree(buf);
1da177e4 1047}
12444809 1048EXPORT_SYMBOL_GPL(rpc_free);
1da177e4
LT
1049
1050/*
1051 * Creation and deletion of RPC task structures
1052 */
47fe0648 1053static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1da177e4
LT
1054{
1055 memset(task, 0, sizeof(*task));
44c28873 1056 atomic_set(&task->tk_count, 1);
84115e1c
TM
1057 task->tk_flags = task_setup_data->flags;
1058 task->tk_ops = task_setup_data->callback_ops;
1059 task->tk_calldata = task_setup_data->callback_data;
6529eba0 1060 INIT_LIST_HEAD(&task->tk_task);
1da177e4 1061
3ff7576d
TM
1062 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1063 task->tk_owner = current->tgid;
1da177e4
LT
1064
1065 /* Initialize workqueue for async tasks */
32bfb5c0 1066 task->tk_workqueue = task_setup_data->workqueue;
1da177e4 1067
a101b043
TM
1068 task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
1069 xprt_get(task_setup_data->rpc_xprt));
9d61498d 1070
1de7eea9
N
1071 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1072
84115e1c
TM
1073 if (task->tk_ops->rpc_call_prepare != NULL)
1074 task->tk_action = rpc_prepare_task;
963d8fe5 1075
7fdcf13b 1076 rpc_init_task_statistics(task);
1da177e4
LT
1077}
1078
1079static struct rpc_task *
1080rpc_alloc_task(void)
1081{
12a3ad61 1082 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
1da177e4
LT
1083}
1084
1da177e4 1085/*
90c5755f 1086 * Create a new task for the specified client.
1da177e4 1087 */
84115e1c 1088struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1da177e4 1089{
e8f5d77c
TM
1090 struct rpc_task *task = setup_data->task;
1091 unsigned short flags = 0;
1092
1093 if (task == NULL) {
1094 task = rpc_alloc_task();
e8f5d77c
TM
1095 flags = RPC_TASK_DYNAMIC;
1096 }
1da177e4 1097
84115e1c 1098 rpc_init_task(task, setup_data);
e8f5d77c 1099 task->tk_flags |= flags;
1da177e4 1100 return task;
1da177e4
LT
1101}
1102
c6567ed1
TM
1103/*
1104 * rpc_free_task - release rpc task and perform cleanups
1105 *
1106 * Note that we free up the rpc_task _after_ rpc_release_calldata()
1107 * in order to work around a workqueue dependency issue.
1108 *
1109 * Tejun Heo states:
1110 * "Workqueue currently considers two work items to be the same if they're
1111 * on the same address and won't execute them concurrently - ie. it
1112 * makes a work item which is queued again while being executed wait
1113 * for the previous execution to complete.
1114 *
1115 * If a work function frees the work item, and then waits for an event
1116 * which should be performed by another work item and *that* work item
1117 * recycles the freed work item, it can create a false dependency loop.
1118 * There really is no reliable way to detect this short of verifying
1119 * every memory free."
1120 *
1121 */
32bfb5c0 1122static void rpc_free_task(struct rpc_task *task)
1da177e4 1123{
c6567ed1
TM
1124 unsigned short tk_flags = task->tk_flags;
1125
1de7eea9 1126 put_rpccred(task->tk_op_cred);
c6567ed1 1127 rpc_release_calldata(task->tk_ops, task->tk_calldata);
1da177e4 1128
1466c221 1129 if (tk_flags & RPC_TASK_DYNAMIC)
5e4424af 1130 mempool_free(task, rpc_task_mempool);
32bfb5c0
TM
1131}
1132
1133static void rpc_async_release(struct work_struct *work)
1134{
a1231fda
TM
1135 unsigned int pflags = memalloc_nofs_save();
1136
32bfb5c0 1137 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
a1231fda 1138 memalloc_nofs_restore(pflags);
32bfb5c0
TM
1139}
1140
bf294b41 1141static void rpc_release_resources_task(struct rpc_task *task)
32bfb5c0 1142{
87ed5003 1143 xprt_release(task);
a271c5a0 1144 if (task->tk_msg.rpc_cred) {
7eac5264
TM
1145 if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1146 put_cred(task->tk_msg.rpc_cred);
a271c5a0
OH
1147 task->tk_msg.rpc_cred = NULL;
1148 }
58f9612c 1149 rpc_task_release_client(task);
bf294b41
TM
1150}
1151
1152static void rpc_final_put_task(struct rpc_task *task,
1153 struct workqueue_struct *q)
1154{
1155 if (q != NULL) {
32bfb5c0 1156 INIT_WORK(&task->u.tk_work, rpc_async_release);
bf294b41 1157 queue_work(q, &task->u.tk_work);
32bfb5c0
TM
1158 } else
1159 rpc_free_task(task);
e6b3c4db 1160}
bf294b41
TM
1161
1162static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1163{
1164 if (atomic_dec_and_test(&task->tk_count)) {
1165 rpc_release_resources_task(task);
1166 rpc_final_put_task(task, q);
1167 }
1168}
1169
1170void rpc_put_task(struct rpc_task *task)
1171{
1172 rpc_do_put_task(task, NULL);
1173}
e8914c65 1174EXPORT_SYMBOL_GPL(rpc_put_task);
e6b3c4db 1175
bf294b41
TM
1176void rpc_put_task_async(struct rpc_task *task)
1177{
1178 rpc_do_put_task(task, task->tk_workqueue);
1179}
1180EXPORT_SYMBOL_GPL(rpc_put_task_async);
1181
bde8f00c 1182static void rpc_release_task(struct rpc_task *task)
e6b3c4db 1183{
0a0c2a57 1184 WARN_ON_ONCE(RPC_IS_QUEUED(task));
1da177e4 1185
bf294b41 1186 rpc_release_resources_task(task);
e6b3c4db 1187
bf294b41
TM
1188 /*
1189 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1190 * so it should be safe to use task->tk_count as a test for whether
1191 * or not any other processes still hold references to our rpc_task.
1192 */
1193 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1194 /* Wake up anyone who may be waiting for task completion */
1195 if (!rpc_complete_task(task))
1196 return;
1197 } else {
1198 if (!atomic_dec_and_test(&task->tk_count))
1199 return;
1200 }
1201 rpc_final_put_task(task, task->tk_workqueue);
1da177e4
LT
1202}
1203
b247bbf1
TM
1204int rpciod_up(void)
1205{
1206 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1207}
1208
1209void rpciod_down(void)
1210{
1211 module_put(THIS_MODULE);
1212}
1213
1da177e4 1214/*
b247bbf1 1215 * Start up the rpciod workqueue.
1da177e4 1216 */
b247bbf1 1217static int rpciod_start(void)
1da177e4
LT
1218{
1219 struct workqueue_struct *wq;
ab418d70 1220
1da177e4
LT
1221 /*
1222 * Create the rpciod thread and wait for it to start.
1223 */
f515f86b 1224 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
40a5f1b1
TM
1225 if (!wq)
1226 goto out_failed;
1da177e4 1227 rpciod_workqueue = wq;
40a5f1b1 1228 /* Note: highpri because network receive is latency sensitive */
90ea9f1b 1229 wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
40a5f1b1
TM
1230 if (!wq)
1231 goto free_rpciod;
1232 xprtiod_workqueue = wq;
1233 return 1;
1234free_rpciod:
1235 wq = rpciod_workqueue;
1236 rpciod_workqueue = NULL;
1237 destroy_workqueue(wq);
1238out_failed:
1239 return 0;
1da177e4
LT
1240}
1241
b247bbf1 1242static void rpciod_stop(void)
1da177e4 1243{
b247bbf1 1244 struct workqueue_struct *wq = NULL;
ab418d70 1245
b247bbf1
TM
1246 if (rpciod_workqueue == NULL)
1247 return;
1da177e4 1248
b247bbf1
TM
1249 wq = rpciod_workqueue;
1250 rpciod_workqueue = NULL;
1251 destroy_workqueue(wq);
40a5f1b1
TM
1252 wq = xprtiod_workqueue;
1253 xprtiod_workqueue = NULL;
1254 destroy_workqueue(wq);
1da177e4
LT
1255}
1256
1da177e4
LT
1257void
1258rpc_destroy_mempool(void)
1259{
b247bbf1 1260 rpciod_stop();
17a9618e
JL
1261 mempool_destroy(rpc_buffer_mempool);
1262 mempool_destroy(rpc_task_mempool);
1263 kmem_cache_destroy(rpc_task_slabp);
1264 kmem_cache_destroy(rpc_buffer_slabp);
f6a1cc89 1265 rpc_destroy_wait_queue(&delay_queue);
1da177e4
LT
1266}
1267
1268int
1269rpc_init_mempool(void)
1270{
f6a1cc89
TM
1271 /*
1272 * The following is not strictly a mempool initialisation,
1273 * but there is no harm in doing it here
1274 */
1275 rpc_init_wait_queue(&delay_queue, "delayq");
1276 if (!rpciod_start())
1277 goto err_nomem;
1278
1da177e4
LT
1279 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1280 sizeof(struct rpc_task),
1281 0, SLAB_HWCACHE_ALIGN,
20c2df83 1282 NULL);
1da177e4
LT
1283 if (!rpc_task_slabp)
1284 goto err_nomem;
1285 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1286 RPC_BUFFER_MAXSIZE,
1287 0, SLAB_HWCACHE_ALIGN,
20c2df83 1288 NULL);
1da177e4
LT
1289 if (!rpc_buffer_slabp)
1290 goto err_nomem;
93d2341c
MD
1291 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1292 rpc_task_slabp);
1da177e4
LT
1293 if (!rpc_task_mempool)
1294 goto err_nomem;
93d2341c
MD
1295 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1296 rpc_buffer_slabp);
1da177e4
LT
1297 if (!rpc_buffer_mempool)
1298 goto err_nomem;
1299 return 0;
1300err_nomem:
1301 rpc_destroy_mempool();
1302 return -ENOMEM;
1303}