]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/net/sunrpc/sched.c | |
3 | * | |
4 | * Scheduling for synchronous and asynchronous RPC requests. | |
5 | * | |
6 | * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> | |
cca5172a | 7 | * |
1da177e4 LT |
8 | * TCP NFS related read + write fixes |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | ||
14 | #include <linux/sched.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/mempool.h> | |
18 | #include <linux/smp.h> | |
1da177e4 | 19 | #include <linux/spinlock.h> |
4a3e2f71 | 20 | #include <linux/mutex.h> |
d310310c | 21 | #include <linux/freezer.h> |
1da177e4 LT |
22 | |
23 | #include <linux/sunrpc/clnt.h> | |
1da177e4 | 24 | |
6951867b BH |
25 | #include "sunrpc.h" |
26 | ||
f895b252 | 27 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
1da177e4 | 28 | #define RPCDBG_FACILITY RPCDBG_SCHED |
1da177e4 LT |
29 | #endif |
30 | ||
82b0a4c3 TM |
31 | #define CREATE_TRACE_POINTS |
32 | #include <trace/events/sunrpc.h> | |
33 | ||
1da177e4 LT |
34 | /* |
35 | * RPC slabs and memory pools | |
36 | */ | |
37 | #define RPC_BUFFER_MAXSIZE (2048) | |
38 | #define RPC_BUFFER_POOLSIZE (8) | |
39 | #define RPC_TASK_POOLSIZE (8) | |
e18b890b CL |
40 | static struct kmem_cache *rpc_task_slabp __read_mostly; |
41 | static struct kmem_cache *rpc_buffer_slabp __read_mostly; | |
ba89966c ED |
42 | static mempool_t *rpc_task_mempool __read_mostly; |
43 | static mempool_t *rpc_buffer_mempool __read_mostly; | |
1da177e4 | 44 | |
65f27f38 | 45 | static void rpc_async_schedule(struct work_struct *); |
bde8f00c | 46 | static void rpc_release_task(struct rpc_task *task); |
ff861c4d | 47 | static void __rpc_queue_timer_fn(struct timer_list *t); |
1da177e4 | 48 | |
1da177e4 LT |
49 | /* |
50 | * RPC tasks sit here while waiting for conditions to improve. | |
51 | */ | |
a4a87499 | 52 | static struct rpc_wait_queue delay_queue; |
1da177e4 | 53 | |
1da177e4 LT |
54 | /* |
55 | * rpciod-related stuff | |
56 | */ | |
40a5f1b1 TM |
57 | struct workqueue_struct *rpciod_workqueue __read_mostly; |
58 | struct workqueue_struct *xprtiod_workqueue __read_mostly; | |
1da177e4 | 59 | |
1da177e4 LT |
60 | /* |
61 | * Disable the timer for a given RPC task. Should be called with | |
62 | * queue->lock and bh_disabled in order to avoid races within | |
63 | * rpc_run_timer(). | |
64 | */ | |
5d00837b | 65 | static void |
eb276c0e | 66 | __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 67 | { |
36df9aae TM |
68 | if (task->tk_timeout == 0) |
69 | return; | |
46121cf7 | 70 | dprintk("RPC: %5u disabling timer\n", task->tk_pid); |
1da177e4 | 71 | task->tk_timeout = 0; |
36df9aae | 72 | list_del(&task->u.tk_wait.timer_list); |
eb276c0e TM |
73 | if (list_empty(&queue->timer_list.list)) |
74 | del_timer(&queue->timer_list.timer); | |
36df9aae TM |
75 | } |
76 | ||
77 | static void | |
78 | rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) | |
79 | { | |
80 | queue->timer_list.expires = expires; | |
81 | mod_timer(&queue->timer_list.timer, expires); | |
1da177e4 LT |
82 | } |
83 | ||
1da177e4 LT |
84 | /* |
85 | * Set up a timer for the current task. | |
86 | */ | |
5d00837b | 87 | static void |
eb276c0e | 88 | __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 LT |
89 | { |
90 | if (!task->tk_timeout) | |
91 | return; | |
92 | ||
55cc1d78 NMG |
93 | dprintk("RPC: %5u setting alarm for %u ms\n", |
94 | task->tk_pid, jiffies_to_msecs(task->tk_timeout)); | |
1da177e4 | 95 | |
eb276c0e TM |
96 | task->u.tk_wait.expires = jiffies + task->tk_timeout; |
97 | if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) | |
98 | rpc_set_queue_timer(queue, task->u.tk_wait.expires); | |
99 | list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); | |
1da177e4 LT |
100 | } |
101 | ||
edd2e36f TM |
102 | static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) |
103 | { | |
104 | struct list_head *q = &queue->tasks[queue->priority]; | |
105 | struct rpc_task *task; | |
106 | ||
107 | if (!list_empty(q)) { | |
108 | task = list_first_entry(q, struct rpc_task, u.tk_wait.list); | |
109 | if (task->tk_owner == queue->owner) | |
110 | list_move_tail(&task->u.tk_wait.list, q); | |
111 | } | |
112 | } | |
113 | ||
c05eecf6 TM |
114 | static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) |
115 | { | |
edd2e36f TM |
116 | if (queue->priority != priority) { |
117 | /* Fairness: rotate the list when changing priority */ | |
118 | rpc_rotate_queue_owner(queue); | |
119 | queue->priority = priority; | |
120 | } | |
c05eecf6 TM |
121 | } |
122 | ||
123 | static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) | |
124 | { | |
125 | queue->owner = pid; | |
126 | queue->nr = RPC_BATCH_COUNT; | |
127 | } | |
128 | ||
129 | static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) | |
130 | { | |
131 | rpc_set_waitqueue_priority(queue, queue->maxpriority); | |
132 | rpc_set_waitqueue_owner(queue, 0); | |
133 | } | |
134 | ||
1da177e4 LT |
135 | /* |
136 | * Add new request to a priority queue. | |
137 | */ | |
3b27bad7 TM |
138 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, |
139 | struct rpc_task *task, | |
140 | unsigned char queue_priority) | |
1da177e4 LT |
141 | { |
142 | struct list_head *q; | |
143 | struct rpc_task *t; | |
144 | ||
145 | INIT_LIST_HEAD(&task->u.tk_wait.links); | |
3b27bad7 | 146 | if (unlikely(queue_priority > queue->maxpriority)) |
c05eecf6 TM |
147 | queue_priority = queue->maxpriority; |
148 | if (queue_priority > queue->priority) | |
149 | rpc_set_waitqueue_priority(queue, queue_priority); | |
150 | q = &queue->tasks[queue_priority]; | |
1da177e4 | 151 | list_for_each_entry(t, q, u.tk_wait.list) { |
3ff7576d | 152 | if (t->tk_owner == task->tk_owner) { |
1da177e4 LT |
153 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); |
154 | return; | |
155 | } | |
156 | } | |
157 | list_add_tail(&task->u.tk_wait.list, q); | |
158 | } | |
159 | ||
160 | /* | |
161 | * Add new request to wait queue. | |
162 | * | |
163 | * Swapper tasks always get inserted at the head of the queue. | |
164 | * This should avoid many nasty memory deadlocks and hopefully | |
165 | * improve overall performance. | |
166 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. | |
167 | */ | |
3b27bad7 TM |
168 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, |
169 | struct rpc_task *task, | |
170 | unsigned char queue_priority) | |
1da177e4 | 171 | { |
2bd4eef8 WAA |
172 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
173 | if (RPC_IS_QUEUED(task)) | |
174 | return; | |
1da177e4 LT |
175 | |
176 | if (RPC_IS_PRIORITY(queue)) | |
3b27bad7 | 177 | __rpc_add_wait_queue_priority(queue, task, queue_priority); |
1da177e4 LT |
178 | else if (RPC_IS_SWAPPER(task)) |
179 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | |
180 | else | |
181 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | |
96ef13b2 | 182 | task->tk_waitqueue = queue; |
e19b63da | 183 | queue->qlen++; |
1166fde6 TM |
184 | /* barrier matches the read in rpc_wake_up_task_queue_locked() */ |
185 | smp_wmb(); | |
1da177e4 LT |
186 | rpc_set_queued(task); |
187 | ||
46121cf7 CL |
188 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
189 | task->tk_pid, queue, rpc_qname(queue)); | |
1da177e4 LT |
190 | } |
191 | ||
192 | /* | |
193 | * Remove request from a priority queue. | |
194 | */ | |
195 | static void __rpc_remove_wait_queue_priority(struct rpc_task *task) | |
196 | { | |
197 | struct rpc_task *t; | |
198 | ||
199 | if (!list_empty(&task->u.tk_wait.links)) { | |
200 | t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); | |
201 | list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); | |
202 | list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); | |
203 | } | |
1da177e4 LT |
204 | } |
205 | ||
206 | /* | |
207 | * Remove request from queue. | |
208 | * Note: must be called with spin lock held. | |
209 | */ | |
96ef13b2 | 210 | static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 211 | { |
eb276c0e | 212 | __rpc_disable_timer(queue, task); |
1da177e4 LT |
213 | if (RPC_IS_PRIORITY(queue)) |
214 | __rpc_remove_wait_queue_priority(task); | |
36df9aae | 215 | list_del(&task->u.tk_wait.list); |
e19b63da | 216 | queue->qlen--; |
46121cf7 CL |
217 | dprintk("RPC: %5u removed from queue %p \"%s\"\n", |
218 | task->tk_pid, queue, rpc_qname(queue)); | |
1da177e4 LT |
219 | } |
220 | ||
3ff7576d | 221 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) |
1da177e4 LT |
222 | { |
223 | int i; | |
224 | ||
225 | spin_lock_init(&queue->lock); | |
226 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) | |
227 | INIT_LIST_HEAD(&queue->tasks[i]); | |
3ff7576d | 228 | queue->maxpriority = nr_queues - 1; |
1da177e4 | 229 | rpc_reset_waitqueue_priority(queue); |
36df9aae | 230 | queue->qlen = 0; |
ff861c4d | 231 | timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0); |
36df9aae | 232 | INIT_LIST_HEAD(&queue->timer_list.list); |
2f09c242 | 233 | rpc_assign_waitqueue_name(queue, qname); |
1da177e4 LT |
234 | } |
235 | ||
236 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
237 | { | |
3ff7576d | 238 | __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); |
1da177e4 | 239 | } |
689cf5c1 | 240 | EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); |
1da177e4 LT |
241 | |
242 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
243 | { | |
3ff7576d | 244 | __rpc_init_priority_wait_queue(queue, qname, 1); |
1da177e4 | 245 | } |
e8914c65 | 246 | EXPORT_SYMBOL_GPL(rpc_init_wait_queue); |
1da177e4 | 247 | |
f6a1cc89 TM |
248 | void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) |
249 | { | |
36df9aae | 250 | del_timer_sync(&queue->timer_list.timer); |
f6a1cc89 TM |
251 | } |
252 | EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); | |
253 | ||
dfd01f02 | 254 | static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) |
44c28873 | 255 | { |
416ad3c9 | 256 | freezable_schedule_unsafe(); |
dfd01f02 PZ |
257 | if (signal_pending_state(mode, current)) |
258 | return -ERESTARTSYS; | |
44c28873 TM |
259 | return 0; |
260 | } | |
261 | ||
1306729b | 262 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) |
c44fe705 TM |
263 | static void rpc_task_set_debuginfo(struct rpc_task *task) |
264 | { | |
265 | static atomic_t rpc_pid; | |
266 | ||
c44fe705 TM |
267 | task->tk_pid = atomic_inc_return(&rpc_pid); |
268 | } | |
269 | #else | |
270 | static inline void rpc_task_set_debuginfo(struct rpc_task *task) | |
271 | { | |
272 | } | |
273 | #endif | |
274 | ||
e6b3c4db TM |
275 | static void rpc_set_active(struct rpc_task *task) |
276 | { | |
c44fe705 | 277 | rpc_task_set_debuginfo(task); |
58f9612c | 278 | set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
b2bfe591 | 279 | trace_rpc_task_begin(task->tk_client, task, NULL); |
e6b3c4db TM |
280 | } |
281 | ||
44c28873 TM |
282 | /* |
283 | * Mark an RPC call as having completed by clearing the 'active' bit | |
bf294b41 | 284 | * and then waking up all tasks that were sleeping. |
44c28873 | 285 | */ |
bf294b41 | 286 | static int rpc_complete_task(struct rpc_task *task) |
44c28873 | 287 | { |
bf294b41 TM |
288 | void *m = &task->tk_runstate; |
289 | wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); | |
290 | struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); | |
291 | unsigned long flags; | |
292 | int ret; | |
293 | ||
82b0a4c3 TM |
294 | trace_rpc_task_complete(task->tk_client, task, NULL); |
295 | ||
bf294b41 | 296 | spin_lock_irqsave(&wq->lock, flags); |
e6b3c4db | 297 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
bf294b41 TM |
298 | ret = atomic_dec_and_test(&task->tk_count); |
299 | if (waitqueue_active(wq)) | |
ac5be6b4 | 300 | __wake_up_locked_key(wq, TASK_NORMAL, &k); |
bf294b41 TM |
301 | spin_unlock_irqrestore(&wq->lock, flags); |
302 | return ret; | |
44c28873 TM |
303 | } |
304 | ||
305 | /* | |
306 | * Allow callers to wait for completion of an RPC call | |
bf294b41 TM |
307 | * |
308 | * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() | |
309 | * to enforce taking of the wq->lock and hence avoid races with | |
310 | * rpc_complete_task(). | |
44c28873 | 311 | */ |
c1221321 | 312 | int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) |
44c28873 TM |
313 | { |
314 | if (action == NULL) | |
150030b7 | 315 | action = rpc_wait_bit_killable; |
bf294b41 | 316 | return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, |
150030b7 | 317 | action, TASK_KILLABLE); |
44c28873 | 318 | } |
e8914c65 | 319 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); |
44c28873 | 320 | |
1da177e4 LT |
321 | /* |
322 | * Make an RPC task runnable. | |
323 | * | |
506026c3 JL |
324 | * Note: If the task is ASYNC, and is being made runnable after sitting on an |
325 | * rpc_wait_queue, this must be called with the queue spinlock held to protect | |
326 | * the wait queue operation. | |
a3c3cac5 TM |
327 | * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), |
328 | * which is needed to ensure that __rpc_execute() doesn't loop (due to the | |
329 | * lockless RPC_IS_QUEUED() test) before we've had a chance to test | |
330 | * the RPC_TASK_RUNNING flag. | |
1da177e4 | 331 | */ |
f1dc237c TM |
332 | static void rpc_make_runnable(struct workqueue_struct *wq, |
333 | struct rpc_task *task) | |
1da177e4 | 334 | { |
a3c3cac5 TM |
335 | bool need_wakeup = !rpc_test_and_set_running(task); |
336 | ||
1da177e4 | 337 | rpc_clear_queued(task); |
a3c3cac5 | 338 | if (!need_wakeup) |
cc4dc59e | 339 | return; |
1da177e4 | 340 | if (RPC_IS_ASYNC(task)) { |
65f27f38 | 341 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
f1dc237c | 342 | queue_work(wq, &task->u.tk_work); |
1da177e4 | 343 | } else |
96651ab3 | 344 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); |
1da177e4 LT |
345 | } |
346 | ||
1da177e4 LT |
347 | /* |
348 | * Prepare for sleeping on a wait queue. | |
349 | * By always appending tasks to the list we ensure FIFO behavior. | |
350 | * NB: An RPC task will only receive interrupt-driven events as long | |
351 | * as it's on a wait queue. | |
352 | */ | |
3b27bad7 TM |
353 | static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, |
354 | struct rpc_task *task, | |
355 | rpc_action action, | |
356 | unsigned char queue_priority) | |
1da177e4 | 357 | { |
46121cf7 CL |
358 | dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", |
359 | task->tk_pid, rpc_qname(q), jiffies); | |
1da177e4 | 360 | |
82b0a4c3 TM |
361 | trace_rpc_task_sleep(task->tk_client, task, q); |
362 | ||
3b27bad7 | 363 | __rpc_add_wait_queue(q, task, queue_priority); |
1da177e4 | 364 | |
f50ad428 | 365 | WARN_ON_ONCE(task->tk_callback != NULL); |
1da177e4 | 366 | task->tk_callback = action; |
eb276c0e | 367 | __rpc_add_timer(q, task); |
1da177e4 LT |
368 | } |
369 | ||
370 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |
5d00837b | 371 | rpc_action action) |
1da177e4 | 372 | { |
58f9612c | 373 | /* We shouldn't ever put an inactive task to sleep */ |
e454a7a8 WAA |
374 | WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); |
375 | if (!RPC_IS_ACTIVATED(task)) { | |
376 | task->tk_status = -EIO; | |
377 | rpc_put_task_async(task); | |
378 | return; | |
379 | } | |
e6b3c4db | 380 | |
1da177e4 LT |
381 | /* |
382 | * Protect the queue operations. | |
383 | */ | |
384 | spin_lock_bh(&q->lock); | |
3b27bad7 | 385 | __rpc_sleep_on_priority(q, task, action, task->tk_priority); |
1da177e4 LT |
386 | spin_unlock_bh(&q->lock); |
387 | } | |
e8914c65 | 388 | EXPORT_SYMBOL_GPL(rpc_sleep_on); |
1da177e4 | 389 | |
3b27bad7 TM |
390 | void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, |
391 | rpc_action action, int priority) | |
392 | { | |
393 | /* We shouldn't ever put an inactive task to sleep */ | |
e454a7a8 WAA |
394 | WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); |
395 | if (!RPC_IS_ACTIVATED(task)) { | |
396 | task->tk_status = -EIO; | |
397 | rpc_put_task_async(task); | |
398 | return; | |
399 | } | |
3b27bad7 TM |
400 | |
401 | /* | |
402 | * Protect the queue operations. | |
403 | */ | |
404 | spin_lock_bh(&q->lock); | |
405 | __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); | |
406 | spin_unlock_bh(&q->lock); | |
407 | } | |
1e1093c7 | 408 | EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); |
3b27bad7 | 409 | |
1da177e4 | 410 | /** |
f1dc237c TM |
411 | * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task |
412 | * @wq: workqueue on which to run task | |
96ef13b2 | 413 | * @queue: wait queue |
1da177e4 LT |
414 | * @task: task to be woken up |
415 | * | |
416 | * Caller must hold queue->lock, and have cleared the task queued flag. | |
417 | */ | |
f1dc237c TM |
418 | static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, |
419 | struct rpc_wait_queue *queue, | |
420 | struct rpc_task *task) | |
1da177e4 | 421 | { |
46121cf7 CL |
422 | dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", |
423 | task->tk_pid, jiffies); | |
1da177e4 | 424 | |
1da177e4 LT |
425 | /* Has the task been executed yet? If not, we cannot wake it up! */ |
426 | if (!RPC_IS_ACTIVATED(task)) { | |
427 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); | |
428 | return; | |
429 | } | |
430 | ||
82b0a4c3 TM |
431 | trace_rpc_task_wakeup(task->tk_client, task, queue); |
432 | ||
96ef13b2 | 433 | __rpc_remove_wait_queue(queue, task); |
1da177e4 | 434 | |
f1dc237c | 435 | rpc_make_runnable(wq, task); |
1da177e4 | 436 | |
46121cf7 | 437 | dprintk("RPC: __rpc_wake_up_task done\n"); |
1da177e4 LT |
438 | } |
439 | ||
440 | /* | |
96ef13b2 | 441 | * Wake up a queued task while the queue lock is being held |
1da177e4 | 442 | */ |
f1dc237c TM |
443 | static void rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq, |
444 | struct rpc_wait_queue *queue, struct rpc_task *task) | |
1da177e4 | 445 | { |
1166fde6 TM |
446 | if (RPC_IS_QUEUED(task)) { |
447 | smp_rmb(); | |
448 | if (task->tk_waitqueue == queue) | |
f1dc237c | 449 | __rpc_do_wake_up_task_on_wq(wq, queue, task); |
1166fde6 | 450 | } |
1da177e4 LT |
451 | } |
452 | ||
f1dc237c TM |
453 | /* |
454 | * Wake up a queued task while the queue lock is being held | |
455 | */ | |
456 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) | |
457 | { | |
458 | rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task); | |
459 | } | |
460 | ||
2275cde4 TM |
461 | /* |
462 | * Wake up a task on a specific queue | |
463 | */ | |
464 | void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq, | |
465 | struct rpc_wait_queue *queue, | |
466 | struct rpc_task *task) | |
467 | { | |
468 | spin_lock_bh(&queue->lock); | |
469 | rpc_wake_up_task_on_wq_queue_locked(wq, queue, task); | |
470 | spin_unlock_bh(&queue->lock); | |
471 | } | |
472 | ||
1da177e4 | 473 | /* |
96ef13b2 | 474 | * Wake up a task on a specific queue |
1da177e4 | 475 | */ |
96ef13b2 | 476 | void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) |
1da177e4 | 477 | { |
5e4424af | 478 | spin_lock_bh(&queue->lock); |
96ef13b2 | 479 | rpc_wake_up_task_queue_locked(queue, task); |
5e4424af | 480 | spin_unlock_bh(&queue->lock); |
1da177e4 | 481 | } |
96ef13b2 TM |
482 | EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); |
483 | ||
1da177e4 LT |
484 | /* |
485 | * Wake up the next task on a priority queue. | |
486 | */ | |
961a828d | 487 | static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) |
1da177e4 LT |
488 | { |
489 | struct list_head *q; | |
490 | struct rpc_task *task; | |
491 | ||
492 | /* | |
3ff7576d | 493 | * Service a batch of tasks from a single owner. |
1da177e4 LT |
494 | */ |
495 | q = &queue->tasks[queue->priority]; | |
496 | if (!list_empty(q)) { | |
497 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
3ff7576d | 498 | if (queue->owner == task->tk_owner) { |
1da177e4 LT |
499 | if (--queue->nr) |
500 | goto out; | |
501 | list_move_tail(&task->u.tk_wait.list, q); | |
502 | } | |
503 | /* | |
504 | * Check if we need to switch queues. | |
505 | */ | |
c05eecf6 | 506 | goto new_owner; |
1da177e4 LT |
507 | } |
508 | ||
509 | /* | |
510 | * Service the next queue. | |
511 | */ | |
512 | do { | |
513 | if (q == &queue->tasks[0]) | |
514 | q = &queue->tasks[queue->maxpriority]; | |
515 | else | |
516 | q = q - 1; | |
517 | if (!list_empty(q)) { | |
518 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
519 | goto new_queue; | |
520 | } | |
521 | } while (q != &queue->tasks[queue->priority]); | |
522 | ||
523 | rpc_reset_waitqueue_priority(queue); | |
524 | return NULL; | |
525 | ||
526 | new_queue: | |
527 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); | |
3ff7576d TM |
528 | new_owner: |
529 | rpc_set_waitqueue_owner(queue, task->tk_owner); | |
1da177e4 | 530 | out: |
1da177e4 LT |
531 | return task; |
532 | } | |
533 | ||
961a828d TM |
534 | static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) |
535 | { | |
536 | if (RPC_IS_PRIORITY(queue)) | |
537 | return __rpc_find_next_queued_priority(queue); | |
538 | if (!list_empty(&queue->tasks[0])) | |
539 | return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); | |
540 | return NULL; | |
541 | } | |
542 | ||
1da177e4 | 543 | /* |
961a828d | 544 | * Wake up the first task on the wait queue. |
1da177e4 | 545 | */ |
f1dc237c TM |
546 | struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, |
547 | struct rpc_wait_queue *queue, | |
961a828d | 548 | bool (*func)(struct rpc_task *, void *), void *data) |
1da177e4 LT |
549 | { |
550 | struct rpc_task *task = NULL; | |
551 | ||
961a828d | 552 | dprintk("RPC: wake_up_first(%p \"%s\")\n", |
46121cf7 | 553 | queue, rpc_qname(queue)); |
5e4424af | 554 | spin_lock_bh(&queue->lock); |
961a828d TM |
555 | task = __rpc_find_next_queued(queue); |
556 | if (task != NULL) { | |
557 | if (func(task, data)) | |
f1dc237c | 558 | rpc_wake_up_task_on_wq_queue_locked(wq, queue, task); |
961a828d TM |
559 | else |
560 | task = NULL; | |
1da177e4 | 561 | } |
5e4424af | 562 | spin_unlock_bh(&queue->lock); |
1da177e4 LT |
563 | |
564 | return task; | |
565 | } | |
f1dc237c TM |
566 | |
567 | /* | |
568 | * Wake up the first task on the wait queue. | |
569 | */ | |
570 | struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, | |
571 | bool (*func)(struct rpc_task *, void *), void *data) | |
572 | { | |
573 | return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data); | |
574 | } | |
961a828d TM |
575 | EXPORT_SYMBOL_GPL(rpc_wake_up_first); |
576 | ||
577 | static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) | |
578 | { | |
579 | return true; | |
580 | } | |
581 | ||
582 | /* | |
583 | * Wake up the next task on the wait queue. | |
584 | */ | |
585 | struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) | |
586 | { | |
587 | return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); | |
588 | } | |
e8914c65 | 589 | EXPORT_SYMBOL_GPL(rpc_wake_up_next); |
1da177e4 LT |
590 | |
591 | /** | |
592 | * rpc_wake_up - wake up all rpc_tasks | |
593 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
594 | * | |
595 | * Grabs queue->lock | |
596 | */ | |
597 | void rpc_wake_up(struct rpc_wait_queue *queue) | |
598 | { | |
1da177e4 | 599 | struct list_head *head; |
e6d83d55 | 600 | |
5e4424af | 601 | spin_lock_bh(&queue->lock); |
1da177e4 LT |
602 | head = &queue->tasks[queue->maxpriority]; |
603 | for (;;) { | |
540a0f75 TM |
604 | while (!list_empty(head)) { |
605 | struct rpc_task *task; | |
606 | task = list_first_entry(head, | |
607 | struct rpc_task, | |
608 | u.tk_wait.list); | |
96ef13b2 | 609 | rpc_wake_up_task_queue_locked(queue, task); |
540a0f75 | 610 | } |
1da177e4 LT |
611 | if (head == &queue->tasks[0]) |
612 | break; | |
613 | head--; | |
614 | } | |
5e4424af | 615 | spin_unlock_bh(&queue->lock); |
1da177e4 | 616 | } |
e8914c65 | 617 | EXPORT_SYMBOL_GPL(rpc_wake_up); |
1da177e4 LT |
618 | |
619 | /** | |
620 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. | |
621 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
622 | * @status: status value to set | |
623 | * | |
624 | * Grabs queue->lock | |
625 | */ | |
626 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |
627 | { | |
628 | struct list_head *head; | |
1da177e4 | 629 | |
5e4424af | 630 | spin_lock_bh(&queue->lock); |
1da177e4 LT |
631 | head = &queue->tasks[queue->maxpriority]; |
632 | for (;;) { | |
540a0f75 TM |
633 | while (!list_empty(head)) { |
634 | struct rpc_task *task; | |
635 | task = list_first_entry(head, | |
636 | struct rpc_task, | |
637 | u.tk_wait.list); | |
1da177e4 | 638 | task->tk_status = status; |
96ef13b2 | 639 | rpc_wake_up_task_queue_locked(queue, task); |
1da177e4 LT |
640 | } |
641 | if (head == &queue->tasks[0]) | |
642 | break; | |
643 | head--; | |
644 | } | |
5e4424af | 645 | spin_unlock_bh(&queue->lock); |
1da177e4 | 646 | } |
e8914c65 | 647 | EXPORT_SYMBOL_GPL(rpc_wake_up_status); |
1da177e4 | 648 | |
ff861c4d | 649 | static void __rpc_queue_timer_fn(struct timer_list *t) |
36df9aae | 650 | { |
ff861c4d | 651 | struct rpc_wait_queue *queue = from_timer(queue, t, timer_list.timer); |
36df9aae TM |
652 | struct rpc_task *task, *n; |
653 | unsigned long expires, now, timeo; | |
654 | ||
655 | spin_lock(&queue->lock); | |
656 | expires = now = jiffies; | |
657 | list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { | |
658 | timeo = task->u.tk_wait.expires; | |
659 | if (time_after_eq(now, timeo)) { | |
36df9aae TM |
660 | dprintk("RPC: %5u timeout\n", task->tk_pid); |
661 | task->tk_status = -ETIMEDOUT; | |
662 | rpc_wake_up_task_queue_locked(queue, task); | |
663 | continue; | |
664 | } | |
665 | if (expires == now || time_after(expires, timeo)) | |
666 | expires = timeo; | |
667 | } | |
668 | if (!list_empty(&queue->timer_list.list)) | |
669 | rpc_set_queue_timer(queue, expires); | |
670 | spin_unlock(&queue->lock); | |
671 | } | |
672 | ||
8014793b TM |
673 | static void __rpc_atrun(struct rpc_task *task) |
674 | { | |
6bd14416 TM |
675 | if (task->tk_status == -ETIMEDOUT) |
676 | task->tk_status = 0; | |
8014793b TM |
677 | } |
678 | ||
1da177e4 LT |
679 | /* |
680 | * Run a task at a later time | |
681 | */ | |
8014793b | 682 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
1da177e4 LT |
683 | { |
684 | task->tk_timeout = delay; | |
5d00837b | 685 | rpc_sleep_on(&delay_queue, task, __rpc_atrun); |
1da177e4 | 686 | } |
e8914c65 | 687 | EXPORT_SYMBOL_GPL(rpc_delay); |
1da177e4 | 688 | |
4ce70ada TM |
689 | /* |
690 | * Helper to call task->tk_ops->rpc_call_prepare | |
691 | */ | |
aae2006e | 692 | void rpc_prepare_task(struct rpc_task *task) |
4ce70ada TM |
693 | { |
694 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); | |
695 | } | |
696 | ||
7fdcf13b TM |
697 | static void |
698 | rpc_init_task_statistics(struct rpc_task *task) | |
699 | { | |
700 | /* Initialize retry counters */ | |
701 | task->tk_garb_retry = 2; | |
702 | task->tk_cred_retry = 2; | |
703 | task->tk_rebind_retry = 2; | |
704 | ||
705 | /* starting timestamp */ | |
706 | task->tk_start = ktime_get(); | |
707 | } | |
708 | ||
709 | static void | |
710 | rpc_reset_task_statistics(struct rpc_task *task) | |
711 | { | |
712 | task->tk_timeouts = 0; | |
713 | task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); | |
714 | ||
715 | rpc_init_task_statistics(task); | |
716 | } | |
717 | ||
d05fdb0c | 718 | /* |
963d8fe5 | 719 | * Helper that calls task->tk_ops->rpc_call_done if it exists |
d05fdb0c | 720 | */ |
abbcf28f | 721 | void rpc_exit_task(struct rpc_task *task) |
d05fdb0c | 722 | { |
abbcf28f | 723 | task->tk_action = NULL; |
963d8fe5 TM |
724 | if (task->tk_ops->rpc_call_done != NULL) { |
725 | task->tk_ops->rpc_call_done(task, task->tk_calldata); | |
d05fdb0c | 726 | if (task->tk_action != NULL) { |
abbcf28f TM |
727 | WARN_ON(RPC_ASSASSINATED(task)); |
728 | /* Always release the RPC slot and buffer memory */ | |
729 | xprt_release(task); | |
7fdcf13b | 730 | rpc_reset_task_statistics(task); |
d05fdb0c TM |
731 | } |
732 | } | |
d05fdb0c | 733 | } |
d9b6cd94 TM |
734 | |
735 | void rpc_exit(struct rpc_task *task, int status) | |
736 | { | |
737 | task->tk_status = status; | |
738 | task->tk_action = rpc_exit_task; | |
739 | if (RPC_IS_QUEUED(task)) | |
740 | rpc_wake_up_queued_task(task->tk_waitqueue, task); | |
741 | } | |
742 | EXPORT_SYMBOL_GPL(rpc_exit); | |
d05fdb0c | 743 | |
bbd5a1f9 TM |
744 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) |
745 | { | |
a86dc496 | 746 | if (ops->rpc_release != NULL) |
bbd5a1f9 | 747 | ops->rpc_release(calldata); |
bbd5a1f9 TM |
748 | } |
749 | ||
1da177e4 LT |
750 | /* |
751 | * This is the RPC `scheduler' (or rather, the finite state machine). | |
752 | */ | |
2efef837 | 753 | static void __rpc_execute(struct rpc_task *task) |
1da177e4 | 754 | { |
eb9b55ab TM |
755 | struct rpc_wait_queue *queue; |
756 | int task_is_async = RPC_IS_ASYNC(task); | |
757 | int status = 0; | |
1da177e4 | 758 | |
46121cf7 CL |
759 | dprintk("RPC: %5u __rpc_execute flags=0x%x\n", |
760 | task->tk_pid, task->tk_flags); | |
1da177e4 | 761 | |
2bd4eef8 WAA |
762 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
763 | if (RPC_IS_QUEUED(task)) | |
764 | return; | |
1da177e4 | 765 | |
d05fdb0c | 766 | for (;;) { |
b55c5989 | 767 | void (*do_action)(struct rpc_task *); |
1da177e4 LT |
768 | |
769 | /* | |
21ead9ff CL |
770 | * Perform the next FSM step or a pending callback. |
771 | * | |
772 | * tk_action may be NULL if the task has been killed. | |
773 | * In particular, note that rpc_killall_tasks may | |
774 | * do this at any time, so beware when dereferencing. | |
1da177e4 | 775 | */ |
21ead9ff CL |
776 | do_action = task->tk_action; |
777 | if (task->tk_callback) { | |
778 | do_action = task->tk_callback; | |
779 | task->tk_callback = NULL; | |
1da177e4 | 780 | } |
21ead9ff CL |
781 | if (!do_action) |
782 | break; | |
cf08d6f2 | 783 | trace_rpc_task_run_action(task->tk_client, task, do_action); |
b55c5989 | 784 | do_action(task); |
1da177e4 LT |
785 | |
786 | /* | |
787 | * Lockless check for whether task is sleeping or not. | |
788 | */ | |
789 | if (!RPC_IS_QUEUED(task)) | |
790 | continue; | |
eb9b55ab TM |
791 | /* |
792 | * The queue->lock protects against races with | |
793 | * rpc_make_runnable(). | |
794 | * | |
795 | * Note that once we clear RPC_TASK_RUNNING on an asynchronous | |
796 | * rpc_task, rpc_make_runnable() can assign it to a | |
797 | * different workqueue. We therefore cannot assume that the | |
798 | * rpc_task pointer may still be dereferenced. | |
799 | */ | |
800 | queue = task->tk_waitqueue; | |
801 | spin_lock_bh(&queue->lock); | |
802 | if (!RPC_IS_QUEUED(task)) { | |
803 | spin_unlock_bh(&queue->lock); | |
1da177e4 LT |
804 | continue; |
805 | } | |
eb9b55ab TM |
806 | rpc_clear_running(task); |
807 | spin_unlock_bh(&queue->lock); | |
808 | if (task_is_async) | |
809 | return; | |
1da177e4 LT |
810 | |
811 | /* sync task: sleep here */ | |
46121cf7 | 812 | dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); |
96651ab3 | 813 | status = out_of_line_wait_on_bit(&task->tk_runstate, |
150030b7 MW |
814 | RPC_TASK_QUEUED, rpc_wait_bit_killable, |
815 | TASK_KILLABLE); | |
96651ab3 | 816 | if (status == -ERESTARTSYS) { |
1da177e4 LT |
817 | /* |
818 | * When a sync task receives a signal, it exits with | |
819 | * -ERESTARTSYS. In order to catch any callbacks that | |
820 | * clean up after sleeping on some queue, we don't | |
821 | * break the loop here, but go around once more. | |
822 | */ | |
46121cf7 | 823 | dprintk("RPC: %5u got signal\n", task->tk_pid); |
96651ab3 TM |
824 | task->tk_flags |= RPC_TASK_KILLED; |
825 | rpc_exit(task, -ERESTARTSYS); | |
1da177e4 | 826 | } |
46121cf7 | 827 | dprintk("RPC: %5u sync task resuming\n", task->tk_pid); |
1da177e4 LT |
828 | } |
829 | ||
46121cf7 CL |
830 | dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, |
831 | task->tk_status); | |
1da177e4 LT |
832 | /* Release all resources associated with the task */ |
833 | rpc_release_task(task); | |
1da177e4 LT |
834 | } |
835 | ||
836 | /* | |
837 | * User-visible entry point to the scheduler. | |
838 | * | |
839 | * This may be called recursively if e.g. an async NFS task updates | |
840 | * the attributes and finds that dirty pages must be flushed. | |
841 | * NOTE: Upon exit of this function the task is guaranteed to be | |
842 | * released. In particular note that tk_release() will have | |
843 | * been called, so your task memory may have been freed. | |
844 | */ | |
2efef837 | 845 | void rpc_execute(struct rpc_task *task) |
1da177e4 | 846 | { |
a76580fb TM |
847 | bool is_async = RPC_IS_ASYNC(task); |
848 | ||
44c28873 | 849 | rpc_set_active(task); |
f1dc237c | 850 | rpc_make_runnable(rpciod_workqueue, task); |
a76580fb | 851 | if (!is_async) |
d6a1ed08 | 852 | __rpc_execute(task); |
1da177e4 LT |
853 | } |
854 | ||
65f27f38 | 855 | static void rpc_async_schedule(struct work_struct *work) |
1da177e4 | 856 | { |
65f27f38 | 857 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
1da177e4 LT |
858 | } |
859 | ||
02107148 | 860 | /** |
5fe6eaa1 CL |
861 | * rpc_malloc - allocate RPC buffer resources |
862 | * @task: RPC task | |
863 | * | |
864 | * A single memory region is allocated, which is split between the | |
865 | * RPC call and RPC reply that this task is being used for. When | |
866 | * this RPC is retired, the memory is released by calling rpc_free. | |
1da177e4 | 867 | * |
c5a4dd8b | 868 | * To prevent rpciod from hanging, this allocator never sleeps, |
5fe6eaa1 CL |
869 | * returning -ENOMEM and suppressing warning if the request cannot |
870 | * be serviced immediately. The caller can arrange to sleep in a | |
871 | * way that is safe for rpciod. | |
c5a4dd8b CL |
872 | * |
873 | * Most requests are 'small' (under 2KiB) and can be serviced from a | |
874 | * mempool, ensuring that NFS reads and writes can always proceed, | |
875 | * and that there is good locality of reference for these buffers. | |
876 | * | |
1da177e4 | 877 | * In order to avoid memory starvation triggering more writebacks of |
c5a4dd8b | 878 | * NFS requests, we avoid using GFP_KERNEL. |
1da177e4 | 879 | */ |
5fe6eaa1 | 880 | int rpc_malloc(struct rpc_task *task) |
1da177e4 | 881 | { |
5fe6eaa1 CL |
882 | struct rpc_rqst *rqst = task->tk_rqstp; |
883 | size_t size = rqst->rq_callsize + rqst->rq_rcvsize; | |
aa3d1fae | 884 | struct rpc_buffer *buf; |
c4a7ca77 | 885 | gfp_t gfp = GFP_NOIO | __GFP_NOWARN; |
a564b8f0 MG |
886 | |
887 | if (RPC_IS_SWAPPER(task)) | |
c4a7ca77 | 888 | gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; |
1da177e4 | 889 | |
aa3d1fae | 890 | size += sizeof(struct rpc_buffer); |
c5a4dd8b CL |
891 | if (size <= RPC_BUFFER_MAXSIZE) |
892 | buf = mempool_alloc(rpc_buffer_mempool, gfp); | |
1da177e4 | 893 | else |
c5a4dd8b | 894 | buf = kmalloc(size, gfp); |
ddce40df PZ |
895 | |
896 | if (!buf) | |
5fe6eaa1 | 897 | return -ENOMEM; |
ddce40df | 898 | |
aa3d1fae | 899 | buf->len = size; |
215d0678 | 900 | dprintk("RPC: %5u allocated buffer of size %zu at %p\n", |
c5a4dd8b | 901 | task->tk_pid, size, buf); |
5fe6eaa1 | 902 | rqst->rq_buffer = buf->data; |
68778945 | 903 | rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; |
5fe6eaa1 | 904 | return 0; |
1da177e4 | 905 | } |
12444809 | 906 | EXPORT_SYMBOL_GPL(rpc_malloc); |
1da177e4 | 907 | |
02107148 | 908 | /** |
3435c74a CL |
909 | * rpc_free - free RPC buffer resources allocated via rpc_malloc |
910 | * @task: RPC task | |
02107148 CL |
911 | * |
912 | */ | |
3435c74a | 913 | void rpc_free(struct rpc_task *task) |
1da177e4 | 914 | { |
3435c74a | 915 | void *buffer = task->tk_rqstp->rq_buffer; |
aa3d1fae CL |
916 | size_t size; |
917 | struct rpc_buffer *buf; | |
02107148 | 918 | |
aa3d1fae CL |
919 | buf = container_of(buffer, struct rpc_buffer, data); |
920 | size = buf->len; | |
c5a4dd8b | 921 | |
215d0678 | 922 | dprintk("RPC: freeing buffer of size %zu at %p\n", |
c5a4dd8b | 923 | size, buf); |
aa3d1fae | 924 | |
c5a4dd8b CL |
925 | if (size <= RPC_BUFFER_MAXSIZE) |
926 | mempool_free(buf, rpc_buffer_mempool); | |
927 | else | |
928 | kfree(buf); | |
1da177e4 | 929 | } |
12444809 | 930 | EXPORT_SYMBOL_GPL(rpc_free); |
1da177e4 LT |
931 | |
932 | /* | |
933 | * Creation and deletion of RPC task structures | |
934 | */ | |
47fe0648 | 935 | static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) |
1da177e4 LT |
936 | { |
937 | memset(task, 0, sizeof(*task)); | |
44c28873 | 938 | atomic_set(&task->tk_count, 1); |
84115e1c TM |
939 | task->tk_flags = task_setup_data->flags; |
940 | task->tk_ops = task_setup_data->callback_ops; | |
941 | task->tk_calldata = task_setup_data->callback_data; | |
6529eba0 | 942 | INIT_LIST_HEAD(&task->tk_task); |
1da177e4 | 943 | |
3ff7576d TM |
944 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; |
945 | task->tk_owner = current->tgid; | |
1da177e4 LT |
946 | |
947 | /* Initialize workqueue for async tasks */ | |
32bfb5c0 | 948 | task->tk_workqueue = task_setup_data->workqueue; |
1da177e4 | 949 | |
9d61498d TM |
950 | task->tk_xprt = xprt_get(task_setup_data->rpc_xprt); |
951 | ||
84115e1c TM |
952 | if (task->tk_ops->rpc_call_prepare != NULL) |
953 | task->tk_action = rpc_prepare_task; | |
963d8fe5 | 954 | |
7fdcf13b | 955 | rpc_init_task_statistics(task); |
ef759a2e | 956 | |
46121cf7 | 957 | dprintk("RPC: new task initialized, procpid %u\n", |
ba25f9dc | 958 | task_pid_nr(current)); |
1da177e4 LT |
959 | } |
960 | ||
961 | static struct rpc_task * | |
962 | rpc_alloc_task(void) | |
963 | { | |
a564b8f0 | 964 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO); |
1da177e4 LT |
965 | } |
966 | ||
1da177e4 | 967 | /* |
90c5755f | 968 | * Create a new task for the specified client. |
1da177e4 | 969 | */ |
84115e1c | 970 | struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) |
1da177e4 | 971 | { |
e8f5d77c TM |
972 | struct rpc_task *task = setup_data->task; |
973 | unsigned short flags = 0; | |
974 | ||
975 | if (task == NULL) { | |
976 | task = rpc_alloc_task(); | |
e8f5d77c TM |
977 | flags = RPC_TASK_DYNAMIC; |
978 | } | |
1da177e4 | 979 | |
84115e1c | 980 | rpc_init_task(task, setup_data); |
e8f5d77c | 981 | task->tk_flags |= flags; |
46121cf7 | 982 | dprintk("RPC: allocated task %p\n", task); |
1da177e4 | 983 | return task; |
1da177e4 LT |
984 | } |
985 | ||
c6567ed1 TM |
986 | /* |
987 | * rpc_free_task - release rpc task and perform cleanups | |
988 | * | |
989 | * Note that we free up the rpc_task _after_ rpc_release_calldata() | |
990 | * in order to work around a workqueue dependency issue. | |
991 | * | |
992 | * Tejun Heo states: | |
993 | * "Workqueue currently considers two work items to be the same if they're | |
994 | * on the same address and won't execute them concurrently - ie. it | |
995 | * makes a work item which is queued again while being executed wait | |
996 | * for the previous execution to complete. | |
997 | * | |
998 | * If a work function frees the work item, and then waits for an event | |
999 | * which should be performed by another work item and *that* work item | |
1000 | * recycles the freed work item, it can create a false dependency loop. | |
1001 | * There really is no reliable way to detect this short of verifying | |
1002 | * every memory free." | |
1003 | * | |
1004 | */ | |
32bfb5c0 | 1005 | static void rpc_free_task(struct rpc_task *task) |
1da177e4 | 1006 | { |
c6567ed1 TM |
1007 | unsigned short tk_flags = task->tk_flags; |
1008 | ||
1009 | rpc_release_calldata(task->tk_ops, task->tk_calldata); | |
1da177e4 | 1010 | |
c6567ed1 | 1011 | if (tk_flags & RPC_TASK_DYNAMIC) { |
5e4424af TM |
1012 | dprintk("RPC: %5u freeing task\n", task->tk_pid); |
1013 | mempool_free(task, rpc_task_mempool); | |
1014 | } | |
32bfb5c0 TM |
1015 | } |
1016 | ||
1017 | static void rpc_async_release(struct work_struct *work) | |
1018 | { | |
1019 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); | |
1020 | } | |
1021 | ||
bf294b41 | 1022 | static void rpc_release_resources_task(struct rpc_task *task) |
32bfb5c0 | 1023 | { |
87ed5003 | 1024 | xprt_release(task); |
a271c5a0 | 1025 | if (task->tk_msg.rpc_cred) { |
a17c2153 | 1026 | put_rpccred(task->tk_msg.rpc_cred); |
a271c5a0 OH |
1027 | task->tk_msg.rpc_cred = NULL; |
1028 | } | |
58f9612c | 1029 | rpc_task_release_client(task); |
bf294b41 TM |
1030 | } |
1031 | ||
1032 | static void rpc_final_put_task(struct rpc_task *task, | |
1033 | struct workqueue_struct *q) | |
1034 | { | |
1035 | if (q != NULL) { | |
32bfb5c0 | 1036 | INIT_WORK(&task->u.tk_work, rpc_async_release); |
bf294b41 | 1037 | queue_work(q, &task->u.tk_work); |
32bfb5c0 TM |
1038 | } else |
1039 | rpc_free_task(task); | |
e6b3c4db | 1040 | } |
bf294b41 TM |
1041 | |
1042 | static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) | |
1043 | { | |
1044 | if (atomic_dec_and_test(&task->tk_count)) { | |
1045 | rpc_release_resources_task(task); | |
1046 | rpc_final_put_task(task, q); | |
1047 | } | |
1048 | } | |
1049 | ||
1050 | void rpc_put_task(struct rpc_task *task) | |
1051 | { | |
1052 | rpc_do_put_task(task, NULL); | |
1053 | } | |
e8914c65 | 1054 | EXPORT_SYMBOL_GPL(rpc_put_task); |
e6b3c4db | 1055 | |
bf294b41 TM |
1056 | void rpc_put_task_async(struct rpc_task *task) |
1057 | { | |
1058 | rpc_do_put_task(task, task->tk_workqueue); | |
1059 | } | |
1060 | EXPORT_SYMBOL_GPL(rpc_put_task_async); | |
1061 | ||
bde8f00c | 1062 | static void rpc_release_task(struct rpc_task *task) |
e6b3c4db | 1063 | { |
46121cf7 | 1064 | dprintk("RPC: %5u release task\n", task->tk_pid); |
1da177e4 | 1065 | |
0a0c2a57 | 1066 | WARN_ON_ONCE(RPC_IS_QUEUED(task)); |
1da177e4 | 1067 | |
bf294b41 | 1068 | rpc_release_resources_task(task); |
e6b3c4db | 1069 | |
bf294b41 TM |
1070 | /* |
1071 | * Note: at this point we have been removed from rpc_clnt->cl_tasks, | |
1072 | * so it should be safe to use task->tk_count as a test for whether | |
1073 | * or not any other processes still hold references to our rpc_task. | |
1074 | */ | |
1075 | if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { | |
1076 | /* Wake up anyone who may be waiting for task completion */ | |
1077 | if (!rpc_complete_task(task)) | |
1078 | return; | |
1079 | } else { | |
1080 | if (!atomic_dec_and_test(&task->tk_count)) | |
1081 | return; | |
1082 | } | |
1083 | rpc_final_put_task(task, task->tk_workqueue); | |
1da177e4 LT |
1084 | } |
1085 | ||
b247bbf1 TM |
1086 | int rpciod_up(void) |
1087 | { | |
1088 | return try_module_get(THIS_MODULE) ? 0 : -EINVAL; | |
1089 | } | |
1090 | ||
1091 | void rpciod_down(void) | |
1092 | { | |
1093 | module_put(THIS_MODULE); | |
1094 | } | |
1095 | ||
1da177e4 | 1096 | /* |
b247bbf1 | 1097 | * Start up the rpciod workqueue. |
1da177e4 | 1098 | */ |
b247bbf1 | 1099 | static int rpciod_start(void) |
1da177e4 LT |
1100 | { |
1101 | struct workqueue_struct *wq; | |
ab418d70 | 1102 | |
1da177e4 LT |
1103 | /* |
1104 | * Create the rpciod thread and wait for it to start. | |
1105 | */ | |
ab418d70 | 1106 | dprintk("RPC: creating workqueue rpciod\n"); |
f515f86b | 1107 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); |
40a5f1b1 TM |
1108 | if (!wq) |
1109 | goto out_failed; | |
1da177e4 | 1110 | rpciod_workqueue = wq; |
40a5f1b1 | 1111 | /* Note: highpri because network receive is latency sensitive */ |
90ea9f1b | 1112 | wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0); |
40a5f1b1 TM |
1113 | if (!wq) |
1114 | goto free_rpciod; | |
1115 | xprtiod_workqueue = wq; | |
1116 | return 1; | |
1117 | free_rpciod: | |
1118 | wq = rpciod_workqueue; | |
1119 | rpciod_workqueue = NULL; | |
1120 | destroy_workqueue(wq); | |
1121 | out_failed: | |
1122 | return 0; | |
1da177e4 LT |
1123 | } |
1124 | ||
b247bbf1 | 1125 | static void rpciod_stop(void) |
1da177e4 | 1126 | { |
b247bbf1 | 1127 | struct workqueue_struct *wq = NULL; |
ab418d70 | 1128 | |
b247bbf1 TM |
1129 | if (rpciod_workqueue == NULL) |
1130 | return; | |
ab418d70 | 1131 | dprintk("RPC: destroying workqueue rpciod\n"); |
1da177e4 | 1132 | |
b247bbf1 TM |
1133 | wq = rpciod_workqueue; |
1134 | rpciod_workqueue = NULL; | |
1135 | destroy_workqueue(wq); | |
40a5f1b1 TM |
1136 | wq = xprtiod_workqueue; |
1137 | xprtiod_workqueue = NULL; | |
1138 | destroy_workqueue(wq); | |
1da177e4 LT |
1139 | } |
1140 | ||
1da177e4 LT |
1141 | void |
1142 | rpc_destroy_mempool(void) | |
1143 | { | |
b247bbf1 | 1144 | rpciod_stop(); |
17a9618e JL |
1145 | mempool_destroy(rpc_buffer_mempool); |
1146 | mempool_destroy(rpc_task_mempool); | |
1147 | kmem_cache_destroy(rpc_task_slabp); | |
1148 | kmem_cache_destroy(rpc_buffer_slabp); | |
f6a1cc89 | 1149 | rpc_destroy_wait_queue(&delay_queue); |
1da177e4 LT |
1150 | } |
1151 | ||
1152 | int | |
1153 | rpc_init_mempool(void) | |
1154 | { | |
f6a1cc89 TM |
1155 | /* |
1156 | * The following is not strictly a mempool initialisation, | |
1157 | * but there is no harm in doing it here | |
1158 | */ | |
1159 | rpc_init_wait_queue(&delay_queue, "delayq"); | |
1160 | if (!rpciod_start()) | |
1161 | goto err_nomem; | |
1162 | ||
1da177e4 LT |
1163 | rpc_task_slabp = kmem_cache_create("rpc_tasks", |
1164 | sizeof(struct rpc_task), | |
1165 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 1166 | NULL); |
1da177e4 LT |
1167 | if (!rpc_task_slabp) |
1168 | goto err_nomem; | |
1169 | rpc_buffer_slabp = kmem_cache_create("rpc_buffers", | |
1170 | RPC_BUFFER_MAXSIZE, | |
1171 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 1172 | NULL); |
1da177e4 LT |
1173 | if (!rpc_buffer_slabp) |
1174 | goto err_nomem; | |
93d2341c MD |
1175 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, |
1176 | rpc_task_slabp); | |
1da177e4 LT |
1177 | if (!rpc_task_mempool) |
1178 | goto err_nomem; | |
93d2341c MD |
1179 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, |
1180 | rpc_buffer_slabp); | |
1da177e4 LT |
1181 | if (!rpc_buffer_mempool) |
1182 | goto err_nomem; | |
1183 | return 0; | |
1184 | err_nomem: | |
1185 | rpc_destroy_mempool(); | |
1186 | return -ENOMEM; | |
1187 | } |