]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/sunrpc/xprt.c
Merge tag 'io_uring-5.13-2021-05-07' of git://git.kernel.dk/linux-block
[mirror_ubuntu-jammy-kernel.git] / net / sunrpc / xprt.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/net/sunrpc/xprt.c
4 *
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
7 *
8 * The interface works like this:
9 *
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
12 * (xprt_reserve).
13 * - Next, the caller puts together the RPC message, stuffs it into
14 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
16 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
18 * expired.
19 * - When a packet arrives, the data_ready handler walks the list of
20 * pending requests for that transport. If a matching XID is found, the
21 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
25 * of -ETIMEDOUT.
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
30 * again.
31 *
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
35 *
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37 *
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39 */
40
41 #include <linux/module.h>
42
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
48
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
54
55 #include <trace/events/sunrpc.h>
56
57 #include "sunrpc.h"
58
59 /*
60 * Local variables
61 */
62
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY RPCDBG_XPRT
65 #endif
66
67 /*
68 * Local functions
69 */
70 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
72 static void xprt_destroy(struct rpc_xprt *xprt);
73
74 static DEFINE_SPINLOCK(xprt_list_lock);
75 static LIST_HEAD(xprt_list);
76
77 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
78 {
79 unsigned long timeout = jiffies + req->rq_timeout;
80
81 if (time_before(timeout, req->rq_majortimeo))
82 return timeout;
83 return req->rq_majortimeo;
84 }
85
86 /**
87 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
89 *
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
92 *
93 * Returns:
94 * 0: transport successfully registered
95 * -EEXIST: transport already registered
96 * -EINVAL: transport module being unloaded
97 */
98 int xprt_register_transport(struct xprt_class *transport)
99 {
100 struct xprt_class *t;
101 int result;
102
103 result = -EEXIST;
104 spin_lock(&xprt_list_lock);
105 list_for_each_entry(t, &xprt_list, list) {
106 /* don't register the same transport class twice */
107 if (t->ident == transport->ident)
108 goto out;
109 }
110
111 list_add_tail(&transport->list, &xprt_list);
112 printk(KERN_INFO "RPC: Registered %s transport module.\n",
113 transport->name);
114 result = 0;
115
116 out:
117 spin_unlock(&xprt_list_lock);
118 return result;
119 }
120 EXPORT_SYMBOL_GPL(xprt_register_transport);
121
122 /**
123 * xprt_unregister_transport - unregister a transport implementation
124 * @transport: transport to unregister
125 *
126 * Returns:
127 * 0: transport successfully unregistered
128 * -ENOENT: transport never registered
129 */
130 int xprt_unregister_transport(struct xprt_class *transport)
131 {
132 struct xprt_class *t;
133 int result;
134
135 result = 0;
136 spin_lock(&xprt_list_lock);
137 list_for_each_entry(t, &xprt_list, list) {
138 if (t == transport) {
139 printk(KERN_INFO
140 "RPC: Unregistered %s transport module.\n",
141 transport->name);
142 list_del_init(&transport->list);
143 goto out;
144 }
145 }
146 result = -ENOENT;
147
148 out:
149 spin_unlock(&xprt_list_lock);
150 return result;
151 }
152 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
153
154 static void
155 xprt_class_release(const struct xprt_class *t)
156 {
157 module_put(t->owner);
158 }
159
160 static const struct xprt_class *
161 xprt_class_find_by_ident_locked(int ident)
162 {
163 const struct xprt_class *t;
164
165 list_for_each_entry(t, &xprt_list, list) {
166 if (t->ident != ident)
167 continue;
168 if (!try_module_get(t->owner))
169 continue;
170 return t;
171 }
172 return NULL;
173 }
174
175 static const struct xprt_class *
176 xprt_class_find_by_ident(int ident)
177 {
178 const struct xprt_class *t;
179
180 spin_lock(&xprt_list_lock);
181 t = xprt_class_find_by_ident_locked(ident);
182 spin_unlock(&xprt_list_lock);
183 return t;
184 }
185
186 static const struct xprt_class *
187 xprt_class_find_by_netid_locked(const char *netid)
188 {
189 const struct xprt_class *t;
190 unsigned int i;
191
192 list_for_each_entry(t, &xprt_list, list) {
193 for (i = 0; t->netid[i][0] != '\0'; i++) {
194 if (strcmp(t->netid[i], netid) != 0)
195 continue;
196 if (!try_module_get(t->owner))
197 continue;
198 return t;
199 }
200 }
201 return NULL;
202 }
203
204 static const struct xprt_class *
205 xprt_class_find_by_netid(const char *netid)
206 {
207 const struct xprt_class *t;
208
209 spin_lock(&xprt_list_lock);
210 t = xprt_class_find_by_netid_locked(netid);
211 if (!t) {
212 spin_unlock(&xprt_list_lock);
213 request_module("rpc%s", netid);
214 spin_lock(&xprt_list_lock);
215 t = xprt_class_find_by_netid_locked(netid);
216 }
217 spin_unlock(&xprt_list_lock);
218 return t;
219 }
220
221 /**
222 * xprt_find_transport_ident - convert a netid into a transport identifier
223 * @netid: transport to load
224 *
225 * Returns:
226 * > 0: transport identifier
227 * -ENOENT: transport module not available
228 */
229 int xprt_find_transport_ident(const char *netid)
230 {
231 const struct xprt_class *t;
232 int ret;
233
234 t = xprt_class_find_by_netid(netid);
235 if (!t)
236 return -ENOENT;
237 ret = t->ident;
238 xprt_class_release(t);
239 return ret;
240 }
241 EXPORT_SYMBOL_GPL(xprt_find_transport_ident);
242
243 static void xprt_clear_locked(struct rpc_xprt *xprt)
244 {
245 xprt->snd_task = NULL;
246 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
247 smp_mb__before_atomic();
248 clear_bit(XPRT_LOCKED, &xprt->state);
249 smp_mb__after_atomic();
250 } else
251 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
252 }
253
254 /**
255 * xprt_reserve_xprt - serialize write access to transports
256 * @task: task that is requesting access to the transport
257 * @xprt: pointer to the target transport
258 *
259 * This prevents mixing the payload of separate requests, and prevents
260 * transport connects from colliding with writes. No congestion control
261 * is provided.
262 */
263 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
264 {
265 struct rpc_rqst *req = task->tk_rqstp;
266
267 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
268 if (task == xprt->snd_task)
269 goto out_locked;
270 goto out_sleep;
271 }
272 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
273 goto out_unlock;
274 xprt->snd_task = task;
275
276 out_locked:
277 trace_xprt_reserve_xprt(xprt, task);
278 return 1;
279
280 out_unlock:
281 xprt_clear_locked(xprt);
282 out_sleep:
283 task->tk_status = -EAGAIN;
284 if (RPC_IS_SOFT(task))
285 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
286 xprt_request_timeout(req));
287 else
288 rpc_sleep_on(&xprt->sending, task, NULL);
289 return 0;
290 }
291 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
292
293 static bool
294 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
295 {
296 return test_bit(XPRT_CWND_WAIT, &xprt->state);
297 }
298
299 static void
300 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
301 {
302 if (!list_empty(&xprt->xmit_queue)) {
303 /* Peek at head of queue to see if it can make progress */
304 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
305 rq_xmit)->rq_cong)
306 return;
307 }
308 set_bit(XPRT_CWND_WAIT, &xprt->state);
309 }
310
311 static void
312 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
313 {
314 if (!RPCXPRT_CONGESTED(xprt))
315 clear_bit(XPRT_CWND_WAIT, &xprt->state);
316 }
317
318 /*
319 * xprt_reserve_xprt_cong - serialize write access to transports
320 * @task: task that is requesting access to the transport
321 *
322 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
323 * integrated into the decision of whether a request is allowed to be
324 * woken up and given access to the transport.
325 * Note that the lock is only granted if we know there are free slots.
326 */
327 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
328 {
329 struct rpc_rqst *req = task->tk_rqstp;
330
331 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
332 if (task == xprt->snd_task)
333 goto out_locked;
334 goto out_sleep;
335 }
336 if (req == NULL) {
337 xprt->snd_task = task;
338 goto out_locked;
339 }
340 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
341 goto out_unlock;
342 if (!xprt_need_congestion_window_wait(xprt)) {
343 xprt->snd_task = task;
344 goto out_locked;
345 }
346 out_unlock:
347 xprt_clear_locked(xprt);
348 out_sleep:
349 task->tk_status = -EAGAIN;
350 if (RPC_IS_SOFT(task))
351 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
352 xprt_request_timeout(req));
353 else
354 rpc_sleep_on(&xprt->sending, task, NULL);
355 return 0;
356 out_locked:
357 trace_xprt_reserve_cong(xprt, task);
358 return 1;
359 }
360 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
361
362 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
363 {
364 int retval;
365
366 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
367 return 1;
368 spin_lock(&xprt->transport_lock);
369 retval = xprt->ops->reserve_xprt(xprt, task);
370 spin_unlock(&xprt->transport_lock);
371 return retval;
372 }
373
374 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
375 {
376 struct rpc_xprt *xprt = data;
377
378 xprt->snd_task = task;
379 return true;
380 }
381
382 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
383 {
384 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
385 return;
386 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
387 goto out_unlock;
388 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
389 __xprt_lock_write_func, xprt))
390 return;
391 out_unlock:
392 xprt_clear_locked(xprt);
393 }
394
395 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
396 {
397 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
398 return;
399 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
400 goto out_unlock;
401 if (xprt_need_congestion_window_wait(xprt))
402 goto out_unlock;
403 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
404 __xprt_lock_write_func, xprt))
405 return;
406 out_unlock:
407 xprt_clear_locked(xprt);
408 }
409
410 /**
411 * xprt_release_xprt - allow other requests to use a transport
412 * @xprt: transport with other tasks potentially waiting
413 * @task: task that is releasing access to the transport
414 *
415 * Note that "task" can be NULL. No congestion control is provided.
416 */
417 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
418 {
419 if (xprt->snd_task == task) {
420 xprt_clear_locked(xprt);
421 __xprt_lock_write_next(xprt);
422 }
423 trace_xprt_release_xprt(xprt, task);
424 }
425 EXPORT_SYMBOL_GPL(xprt_release_xprt);
426
427 /**
428 * xprt_release_xprt_cong - allow other requests to use a transport
429 * @xprt: transport with other tasks potentially waiting
430 * @task: task that is releasing access to the transport
431 *
432 * Note that "task" can be NULL. Another task is awoken to use the
433 * transport if the transport's congestion window allows it.
434 */
435 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
436 {
437 if (xprt->snd_task == task) {
438 xprt_clear_locked(xprt);
439 __xprt_lock_write_next_cong(xprt);
440 }
441 trace_xprt_release_cong(xprt, task);
442 }
443 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
444
445 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
446 {
447 if (xprt->snd_task != task)
448 return;
449 spin_lock(&xprt->transport_lock);
450 xprt->ops->release_xprt(xprt, task);
451 spin_unlock(&xprt->transport_lock);
452 }
453
454 /*
455 * Van Jacobson congestion avoidance. Check if the congestion window
456 * overflowed. Put the task to sleep if this is the case.
457 */
458 static int
459 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
460 {
461 if (req->rq_cong)
462 return 1;
463 trace_xprt_get_cong(xprt, req->rq_task);
464 if (RPCXPRT_CONGESTED(xprt)) {
465 xprt_set_congestion_window_wait(xprt);
466 return 0;
467 }
468 req->rq_cong = 1;
469 xprt->cong += RPC_CWNDSCALE;
470 return 1;
471 }
472
473 /*
474 * Adjust the congestion window, and wake up the next task
475 * that has been sleeping due to congestion
476 */
477 static void
478 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
479 {
480 if (!req->rq_cong)
481 return;
482 req->rq_cong = 0;
483 xprt->cong -= RPC_CWNDSCALE;
484 xprt_test_and_clear_congestion_window_wait(xprt);
485 trace_xprt_put_cong(xprt, req->rq_task);
486 __xprt_lock_write_next_cong(xprt);
487 }
488
489 /**
490 * xprt_request_get_cong - Request congestion control credits
491 * @xprt: pointer to transport
492 * @req: pointer to RPC request
493 *
494 * Useful for transports that require congestion control.
495 */
496 bool
497 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
498 {
499 bool ret = false;
500
501 if (req->rq_cong)
502 return true;
503 spin_lock(&xprt->transport_lock);
504 ret = __xprt_get_cong(xprt, req) != 0;
505 spin_unlock(&xprt->transport_lock);
506 return ret;
507 }
508 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
509
510 /**
511 * xprt_release_rqst_cong - housekeeping when request is complete
512 * @task: RPC request that recently completed
513 *
514 * Useful for transports that require congestion control.
515 */
516 void xprt_release_rqst_cong(struct rpc_task *task)
517 {
518 struct rpc_rqst *req = task->tk_rqstp;
519
520 __xprt_put_cong(req->rq_xprt, req);
521 }
522 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
523
524 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
525 {
526 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
527 __xprt_lock_write_next_cong(xprt);
528 }
529
530 /*
531 * Clear the congestion window wait flag and wake up the next
532 * entry on xprt->sending
533 */
534 static void
535 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
536 {
537 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
538 spin_lock(&xprt->transport_lock);
539 __xprt_lock_write_next_cong(xprt);
540 spin_unlock(&xprt->transport_lock);
541 }
542 }
543
544 /**
545 * xprt_adjust_cwnd - adjust transport congestion window
546 * @xprt: pointer to xprt
547 * @task: recently completed RPC request used to adjust window
548 * @result: result code of completed RPC request
549 *
550 * The transport code maintains an estimate on the maximum number of out-
551 * standing RPC requests, using a smoothed version of the congestion
552 * avoidance implemented in 44BSD. This is basically the Van Jacobson
553 * congestion algorithm: If a retransmit occurs, the congestion window is
554 * halved; otherwise, it is incremented by 1/cwnd when
555 *
556 * - a reply is received and
557 * - a full number of requests are outstanding and
558 * - the congestion window hasn't been updated recently.
559 */
560 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
561 {
562 struct rpc_rqst *req = task->tk_rqstp;
563 unsigned long cwnd = xprt->cwnd;
564
565 if (result >= 0 && cwnd <= xprt->cong) {
566 /* The (cwnd >> 1) term makes sure
567 * the result gets rounded properly. */
568 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
569 if (cwnd > RPC_MAXCWND(xprt))
570 cwnd = RPC_MAXCWND(xprt);
571 __xprt_lock_write_next_cong(xprt);
572 } else if (result == -ETIMEDOUT) {
573 cwnd >>= 1;
574 if (cwnd < RPC_CWNDSCALE)
575 cwnd = RPC_CWNDSCALE;
576 }
577 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
578 xprt->cong, xprt->cwnd, cwnd);
579 xprt->cwnd = cwnd;
580 __xprt_put_cong(xprt, req);
581 }
582 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
583
584 /**
585 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
586 * @xprt: transport with waiting tasks
587 * @status: result code to plant in each task before waking it
588 *
589 */
590 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
591 {
592 if (status < 0)
593 rpc_wake_up_status(&xprt->pending, status);
594 else
595 rpc_wake_up(&xprt->pending);
596 }
597 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
598
599 /**
600 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
601 * @xprt: transport
602 *
603 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
604 * we don't in general want to force a socket disconnection due to
605 * an incomplete RPC call transmission.
606 */
607 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
608 {
609 set_bit(XPRT_WRITE_SPACE, &xprt->state);
610 }
611 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
612
613 static bool
614 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
615 {
616 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
617 __xprt_lock_write_next(xprt);
618 dprintk("RPC: write space: waking waiting task on "
619 "xprt %p\n", xprt);
620 return true;
621 }
622 return false;
623 }
624
625 /**
626 * xprt_write_space - wake the task waiting for transport output buffer space
627 * @xprt: transport with waiting tasks
628 *
629 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
630 */
631 bool xprt_write_space(struct rpc_xprt *xprt)
632 {
633 bool ret;
634
635 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
636 return false;
637 spin_lock(&xprt->transport_lock);
638 ret = xprt_clear_write_space_locked(xprt);
639 spin_unlock(&xprt->transport_lock);
640 return ret;
641 }
642 EXPORT_SYMBOL_GPL(xprt_write_space);
643
644 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
645 {
646 s64 delta = ktime_to_ns(ktime_get() - abstime);
647 return likely(delta >= 0) ?
648 jiffies - nsecs_to_jiffies(delta) :
649 jiffies + nsecs_to_jiffies(-delta);
650 }
651
652 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
653 {
654 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
655 unsigned long majortimeo = req->rq_timeout;
656
657 if (to->to_exponential)
658 majortimeo <<= to->to_retries;
659 else
660 majortimeo += to->to_increment * to->to_retries;
661 if (majortimeo > to->to_maxval || majortimeo == 0)
662 majortimeo = to->to_maxval;
663 return majortimeo;
664 }
665
666 static void xprt_reset_majortimeo(struct rpc_rqst *req)
667 {
668 req->rq_majortimeo += xprt_calc_majortimeo(req);
669 }
670
671 static void xprt_reset_minortimeo(struct rpc_rqst *req)
672 {
673 req->rq_minortimeo += req->rq_timeout;
674 }
675
676 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
677 {
678 unsigned long time_init;
679 struct rpc_xprt *xprt = req->rq_xprt;
680
681 if (likely(xprt && xprt_connected(xprt)))
682 time_init = jiffies;
683 else
684 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
685 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
686 req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
687 req->rq_minortimeo = time_init + req->rq_timeout;
688 }
689
690 /**
691 * xprt_adjust_timeout - adjust timeout values for next retransmit
692 * @req: RPC request containing parameters to use for the adjustment
693 *
694 */
695 int xprt_adjust_timeout(struct rpc_rqst *req)
696 {
697 struct rpc_xprt *xprt = req->rq_xprt;
698 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
699 int status = 0;
700
701 if (time_before(jiffies, req->rq_majortimeo)) {
702 if (time_before(jiffies, req->rq_minortimeo))
703 return status;
704 if (to->to_exponential)
705 req->rq_timeout <<= 1;
706 else
707 req->rq_timeout += to->to_increment;
708 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
709 req->rq_timeout = to->to_maxval;
710 req->rq_retries++;
711 } else {
712 req->rq_timeout = to->to_initval;
713 req->rq_retries = 0;
714 xprt_reset_majortimeo(req);
715 /* Reset the RTT counters == "slow start" */
716 spin_lock(&xprt->transport_lock);
717 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
718 spin_unlock(&xprt->transport_lock);
719 status = -ETIMEDOUT;
720 }
721 xprt_reset_minortimeo(req);
722
723 if (req->rq_timeout == 0) {
724 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
725 req->rq_timeout = 5 * HZ;
726 }
727 return status;
728 }
729
730 static void xprt_autoclose(struct work_struct *work)
731 {
732 struct rpc_xprt *xprt =
733 container_of(work, struct rpc_xprt, task_cleanup);
734 unsigned int pflags = memalloc_nofs_save();
735
736 trace_xprt_disconnect_auto(xprt);
737 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
738 xprt->ops->close(xprt);
739 xprt_release_write(xprt, NULL);
740 wake_up_bit(&xprt->state, XPRT_LOCKED);
741 memalloc_nofs_restore(pflags);
742 }
743
744 /**
745 * xprt_disconnect_done - mark a transport as disconnected
746 * @xprt: transport to flag for disconnect
747 *
748 */
749 void xprt_disconnect_done(struct rpc_xprt *xprt)
750 {
751 trace_xprt_disconnect_done(xprt);
752 spin_lock(&xprt->transport_lock);
753 xprt_clear_connected(xprt);
754 xprt_clear_write_space_locked(xprt);
755 xprt_clear_congestion_window_wait_locked(xprt);
756 xprt_wake_pending_tasks(xprt, -ENOTCONN);
757 spin_unlock(&xprt->transport_lock);
758 }
759 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
760
761 /**
762 * xprt_force_disconnect - force a transport to disconnect
763 * @xprt: transport to disconnect
764 *
765 */
766 void xprt_force_disconnect(struct rpc_xprt *xprt)
767 {
768 trace_xprt_disconnect_force(xprt);
769
770 /* Don't race with the test_bit() in xprt_clear_locked() */
771 spin_lock(&xprt->transport_lock);
772 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
773 /* Try to schedule an autoclose RPC call */
774 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
775 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
776 else if (xprt->snd_task)
777 rpc_wake_up_queued_task_set_status(&xprt->pending,
778 xprt->snd_task, -ENOTCONN);
779 spin_unlock(&xprt->transport_lock);
780 }
781 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
782
783 static unsigned int
784 xprt_connect_cookie(struct rpc_xprt *xprt)
785 {
786 return READ_ONCE(xprt->connect_cookie);
787 }
788
789 static bool
790 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
791 {
792 struct rpc_rqst *req = task->tk_rqstp;
793 struct rpc_xprt *xprt = req->rq_xprt;
794
795 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
796 !xprt_connected(xprt);
797 }
798
799 /**
800 * xprt_conditional_disconnect - force a transport to disconnect
801 * @xprt: transport to disconnect
802 * @cookie: 'connection cookie'
803 *
804 * This attempts to break the connection if and only if 'cookie' matches
805 * the current transport 'connection cookie'. It ensures that we don't
806 * try to break the connection more than once when we need to retransmit
807 * a batch of RPC requests.
808 *
809 */
810 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
811 {
812 /* Don't race with the test_bit() in xprt_clear_locked() */
813 spin_lock(&xprt->transport_lock);
814 if (cookie != xprt->connect_cookie)
815 goto out;
816 if (test_bit(XPRT_CLOSING, &xprt->state))
817 goto out;
818 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
819 /* Try to schedule an autoclose RPC call */
820 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
821 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
822 xprt_wake_pending_tasks(xprt, -EAGAIN);
823 out:
824 spin_unlock(&xprt->transport_lock);
825 }
826
827 static bool
828 xprt_has_timer(const struct rpc_xprt *xprt)
829 {
830 return xprt->idle_timeout != 0;
831 }
832
833 static void
834 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
835 __must_hold(&xprt->transport_lock)
836 {
837 xprt->last_used = jiffies;
838 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
839 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
840 }
841
842 static void
843 xprt_init_autodisconnect(struct timer_list *t)
844 {
845 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
846
847 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
848 return;
849 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
850 xprt->last_used = jiffies;
851 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
852 return;
853 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
854 }
855
856 bool xprt_lock_connect(struct rpc_xprt *xprt,
857 struct rpc_task *task,
858 void *cookie)
859 {
860 bool ret = false;
861
862 spin_lock(&xprt->transport_lock);
863 if (!test_bit(XPRT_LOCKED, &xprt->state))
864 goto out;
865 if (xprt->snd_task != task)
866 goto out;
867 xprt->snd_task = cookie;
868 ret = true;
869 out:
870 spin_unlock(&xprt->transport_lock);
871 return ret;
872 }
873
874 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
875 {
876 spin_lock(&xprt->transport_lock);
877 if (xprt->snd_task != cookie)
878 goto out;
879 if (!test_bit(XPRT_LOCKED, &xprt->state))
880 goto out;
881 xprt->snd_task =NULL;
882 xprt->ops->release_xprt(xprt, NULL);
883 xprt_schedule_autodisconnect(xprt);
884 out:
885 spin_unlock(&xprt->transport_lock);
886 wake_up_bit(&xprt->state, XPRT_LOCKED);
887 }
888
889 /**
890 * xprt_connect - schedule a transport connect operation
891 * @task: RPC task that is requesting the connect
892 *
893 */
894 void xprt_connect(struct rpc_task *task)
895 {
896 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
897
898 trace_xprt_connect(xprt);
899
900 if (!xprt_bound(xprt)) {
901 task->tk_status = -EAGAIN;
902 return;
903 }
904 if (!xprt_lock_write(xprt, task))
905 return;
906
907 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
908 trace_xprt_disconnect_cleanup(xprt);
909 xprt->ops->close(xprt);
910 }
911
912 if (!xprt_connected(xprt)) {
913 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
914 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
915 xprt_request_timeout(task->tk_rqstp));
916
917 if (test_bit(XPRT_CLOSING, &xprt->state))
918 return;
919 if (xprt_test_and_set_connecting(xprt))
920 return;
921 /* Race breaker */
922 if (!xprt_connected(xprt)) {
923 xprt->stat.connect_start = jiffies;
924 xprt->ops->connect(xprt, task);
925 } else {
926 xprt_clear_connecting(xprt);
927 task->tk_status = 0;
928 rpc_wake_up_queued_task(&xprt->pending, task);
929 }
930 }
931 xprt_release_write(xprt, task);
932 }
933
934 /**
935 * xprt_reconnect_delay - compute the wait before scheduling a connect
936 * @xprt: transport instance
937 *
938 */
939 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
940 {
941 unsigned long start, now = jiffies;
942
943 start = xprt->stat.connect_start + xprt->reestablish_timeout;
944 if (time_after(start, now))
945 return start - now;
946 return 0;
947 }
948 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
949
950 /**
951 * xprt_reconnect_backoff - compute the new re-establish timeout
952 * @xprt: transport instance
953 * @init_to: initial reestablish timeout
954 *
955 */
956 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
957 {
958 xprt->reestablish_timeout <<= 1;
959 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
960 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
961 if (xprt->reestablish_timeout < init_to)
962 xprt->reestablish_timeout = init_to;
963 }
964 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
965
966 enum xprt_xid_rb_cmp {
967 XID_RB_EQUAL,
968 XID_RB_LEFT,
969 XID_RB_RIGHT,
970 };
971 static enum xprt_xid_rb_cmp
972 xprt_xid_cmp(__be32 xid1, __be32 xid2)
973 {
974 if (xid1 == xid2)
975 return XID_RB_EQUAL;
976 if ((__force u32)xid1 < (__force u32)xid2)
977 return XID_RB_LEFT;
978 return XID_RB_RIGHT;
979 }
980
981 static struct rpc_rqst *
982 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
983 {
984 struct rb_node *n = xprt->recv_queue.rb_node;
985 struct rpc_rqst *req;
986
987 while (n != NULL) {
988 req = rb_entry(n, struct rpc_rqst, rq_recv);
989 switch (xprt_xid_cmp(xid, req->rq_xid)) {
990 case XID_RB_LEFT:
991 n = n->rb_left;
992 break;
993 case XID_RB_RIGHT:
994 n = n->rb_right;
995 break;
996 case XID_RB_EQUAL:
997 return req;
998 }
999 }
1000 return NULL;
1001 }
1002
1003 static void
1004 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
1005 {
1006 struct rb_node **p = &xprt->recv_queue.rb_node;
1007 struct rb_node *n = NULL;
1008 struct rpc_rqst *req;
1009
1010 while (*p != NULL) {
1011 n = *p;
1012 req = rb_entry(n, struct rpc_rqst, rq_recv);
1013 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
1014 case XID_RB_LEFT:
1015 p = &n->rb_left;
1016 break;
1017 case XID_RB_RIGHT:
1018 p = &n->rb_right;
1019 break;
1020 case XID_RB_EQUAL:
1021 WARN_ON_ONCE(new != req);
1022 return;
1023 }
1024 }
1025 rb_link_node(&new->rq_recv, n, p);
1026 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1027 }
1028
1029 static void
1030 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1031 {
1032 rb_erase(&req->rq_recv, &xprt->recv_queue);
1033 }
1034
1035 /**
1036 * xprt_lookup_rqst - find an RPC request corresponding to an XID
1037 * @xprt: transport on which the original request was transmitted
1038 * @xid: RPC XID of incoming reply
1039 *
1040 * Caller holds xprt->queue_lock.
1041 */
1042 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1043 {
1044 struct rpc_rqst *entry;
1045
1046 entry = xprt_request_rb_find(xprt, xid);
1047 if (entry != NULL) {
1048 trace_xprt_lookup_rqst(xprt, xid, 0);
1049 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1050 return entry;
1051 }
1052
1053 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
1054 ntohl(xid));
1055 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1056 xprt->stat.bad_xids++;
1057 return NULL;
1058 }
1059 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1060
1061 static bool
1062 xprt_is_pinned_rqst(struct rpc_rqst *req)
1063 {
1064 return atomic_read(&req->rq_pin) != 0;
1065 }
1066
1067 /**
1068 * xprt_pin_rqst - Pin a request on the transport receive list
1069 * @req: Request to pin
1070 *
1071 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1072 * so should be holding xprt->queue_lock.
1073 */
1074 void xprt_pin_rqst(struct rpc_rqst *req)
1075 {
1076 atomic_inc(&req->rq_pin);
1077 }
1078 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1079
1080 /**
1081 * xprt_unpin_rqst - Unpin a request on the transport receive list
1082 * @req: Request to pin
1083 *
1084 * Caller should be holding xprt->queue_lock.
1085 */
1086 void xprt_unpin_rqst(struct rpc_rqst *req)
1087 {
1088 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1089 atomic_dec(&req->rq_pin);
1090 return;
1091 }
1092 if (atomic_dec_and_test(&req->rq_pin))
1093 wake_up_var(&req->rq_pin);
1094 }
1095 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1096
1097 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1098 {
1099 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1100 }
1101
1102 static bool
1103 xprt_request_data_received(struct rpc_task *task)
1104 {
1105 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1106 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1107 }
1108
1109 static bool
1110 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1111 {
1112 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1113 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1114 }
1115
1116 /**
1117 * xprt_request_enqueue_receive - Add an request to the receive queue
1118 * @task: RPC task
1119 *
1120 */
1121 void
1122 xprt_request_enqueue_receive(struct rpc_task *task)
1123 {
1124 struct rpc_rqst *req = task->tk_rqstp;
1125 struct rpc_xprt *xprt = req->rq_xprt;
1126
1127 if (!xprt_request_need_enqueue_receive(task, req))
1128 return;
1129
1130 xprt_request_prepare(task->tk_rqstp);
1131 spin_lock(&xprt->queue_lock);
1132
1133 /* Update the softirq receive buffer */
1134 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1135 sizeof(req->rq_private_buf));
1136
1137 /* Add request to the receive list */
1138 xprt_request_rb_insert(xprt, req);
1139 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1140 spin_unlock(&xprt->queue_lock);
1141
1142 /* Turn off autodisconnect */
1143 del_singleshot_timer_sync(&xprt->timer);
1144 }
1145
1146 /**
1147 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1148 * @task: RPC task
1149 *
1150 * Caller must hold xprt->queue_lock.
1151 */
1152 static void
1153 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1154 {
1155 struct rpc_rqst *req = task->tk_rqstp;
1156
1157 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1158 xprt_request_rb_remove(req->rq_xprt, req);
1159 }
1160
1161 /**
1162 * xprt_update_rtt - Update RPC RTT statistics
1163 * @task: RPC request that recently completed
1164 *
1165 * Caller holds xprt->queue_lock.
1166 */
1167 void xprt_update_rtt(struct rpc_task *task)
1168 {
1169 struct rpc_rqst *req = task->tk_rqstp;
1170 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1171 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1172 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1173
1174 if (timer) {
1175 if (req->rq_ntrans == 1)
1176 rpc_update_rtt(rtt, timer, m);
1177 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1178 }
1179 }
1180 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1181
1182 /**
1183 * xprt_complete_rqst - called when reply processing is complete
1184 * @task: RPC request that recently completed
1185 * @copied: actual number of bytes received from the transport
1186 *
1187 * Caller holds xprt->queue_lock.
1188 */
1189 void xprt_complete_rqst(struct rpc_task *task, int copied)
1190 {
1191 struct rpc_rqst *req = task->tk_rqstp;
1192 struct rpc_xprt *xprt = req->rq_xprt;
1193
1194 xprt->stat.recvs++;
1195
1196 req->rq_private_buf.len = copied;
1197 /* Ensure all writes are done before we update */
1198 /* req->rq_reply_bytes_recvd */
1199 smp_wmb();
1200 req->rq_reply_bytes_recvd = copied;
1201 xprt_request_dequeue_receive_locked(task);
1202 rpc_wake_up_queued_task(&xprt->pending, task);
1203 }
1204 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1205
1206 static void xprt_timer(struct rpc_task *task)
1207 {
1208 struct rpc_rqst *req = task->tk_rqstp;
1209 struct rpc_xprt *xprt = req->rq_xprt;
1210
1211 if (task->tk_status != -ETIMEDOUT)
1212 return;
1213
1214 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1215 if (!req->rq_reply_bytes_recvd) {
1216 if (xprt->ops->timer)
1217 xprt->ops->timer(xprt, task);
1218 } else
1219 task->tk_status = 0;
1220 }
1221
1222 /**
1223 * xprt_wait_for_reply_request_def - wait for reply
1224 * @task: pointer to rpc_task
1225 *
1226 * Set a request's retransmit timeout based on the transport's
1227 * default timeout parameters. Used by transports that don't adjust
1228 * the retransmit timeout based on round-trip time estimation,
1229 * and put the task to sleep on the pending queue.
1230 */
1231 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1232 {
1233 struct rpc_rqst *req = task->tk_rqstp;
1234
1235 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1236 xprt_request_timeout(req));
1237 }
1238 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1239
1240 /**
1241 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1242 * @task: pointer to rpc_task
1243 *
1244 * Set a request's retransmit timeout using the RTT estimator,
1245 * and put the task to sleep on the pending queue.
1246 */
1247 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1248 {
1249 int timer = task->tk_msg.rpc_proc->p_timer;
1250 struct rpc_clnt *clnt = task->tk_client;
1251 struct rpc_rtt *rtt = clnt->cl_rtt;
1252 struct rpc_rqst *req = task->tk_rqstp;
1253 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1254 unsigned long timeout;
1255
1256 timeout = rpc_calc_rto(rtt, timer);
1257 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1258 if (timeout > max_timeout || timeout == 0)
1259 timeout = max_timeout;
1260 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1261 jiffies + timeout);
1262 }
1263 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1264
1265 /**
1266 * xprt_request_wait_receive - wait for the reply to an RPC request
1267 * @task: RPC task about to send a request
1268 *
1269 */
1270 void xprt_request_wait_receive(struct rpc_task *task)
1271 {
1272 struct rpc_rqst *req = task->tk_rqstp;
1273 struct rpc_xprt *xprt = req->rq_xprt;
1274
1275 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1276 return;
1277 /*
1278 * Sleep on the pending queue if we're expecting a reply.
1279 * The spinlock ensures atomicity between the test of
1280 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1281 */
1282 spin_lock(&xprt->queue_lock);
1283 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1284 xprt->ops->wait_for_reply_request(task);
1285 /*
1286 * Send an extra queue wakeup call if the
1287 * connection was dropped in case the call to
1288 * rpc_sleep_on() raced.
1289 */
1290 if (xprt_request_retransmit_after_disconnect(task))
1291 rpc_wake_up_queued_task_set_status(&xprt->pending,
1292 task, -ENOTCONN);
1293 }
1294 spin_unlock(&xprt->queue_lock);
1295 }
1296
1297 static bool
1298 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1299 {
1300 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1301 }
1302
1303 /**
1304 * xprt_request_enqueue_transmit - queue a task for transmission
1305 * @task: pointer to rpc_task
1306 *
1307 * Add a task to the transmission queue.
1308 */
1309 void
1310 xprt_request_enqueue_transmit(struct rpc_task *task)
1311 {
1312 struct rpc_rqst *pos, *req = task->tk_rqstp;
1313 struct rpc_xprt *xprt = req->rq_xprt;
1314
1315 if (xprt_request_need_enqueue_transmit(task, req)) {
1316 req->rq_bytes_sent = 0;
1317 spin_lock(&xprt->queue_lock);
1318 /*
1319 * Requests that carry congestion control credits are added
1320 * to the head of the list to avoid starvation issues.
1321 */
1322 if (req->rq_cong) {
1323 xprt_clear_congestion_window_wait(xprt);
1324 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1325 if (pos->rq_cong)
1326 continue;
1327 /* Note: req is added _before_ pos */
1328 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1329 INIT_LIST_HEAD(&req->rq_xmit2);
1330 goto out;
1331 }
1332 } else if (RPC_IS_SWAPPER(task)) {
1333 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1334 if (pos->rq_cong || pos->rq_bytes_sent)
1335 continue;
1336 if (RPC_IS_SWAPPER(pos->rq_task))
1337 continue;
1338 /* Note: req is added _before_ pos */
1339 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1340 INIT_LIST_HEAD(&req->rq_xmit2);
1341 goto out;
1342 }
1343 } else if (!req->rq_seqno) {
1344 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1345 if (pos->rq_task->tk_owner != task->tk_owner)
1346 continue;
1347 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1348 INIT_LIST_HEAD(&req->rq_xmit);
1349 goto out;
1350 }
1351 }
1352 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1353 INIT_LIST_HEAD(&req->rq_xmit2);
1354 out:
1355 atomic_long_inc(&xprt->xmit_queuelen);
1356 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1357 spin_unlock(&xprt->queue_lock);
1358 }
1359 }
1360
1361 /**
1362 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1363 * @task: pointer to rpc_task
1364 *
1365 * Remove a task from the transmission queue
1366 * Caller must hold xprt->queue_lock
1367 */
1368 static void
1369 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1370 {
1371 struct rpc_rqst *req = task->tk_rqstp;
1372
1373 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1374 return;
1375 if (!list_empty(&req->rq_xmit)) {
1376 list_del(&req->rq_xmit);
1377 if (!list_empty(&req->rq_xmit2)) {
1378 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1379 struct rpc_rqst, rq_xmit2);
1380 list_del(&req->rq_xmit2);
1381 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1382 }
1383 } else
1384 list_del(&req->rq_xmit2);
1385 atomic_long_dec(&req->rq_xprt->xmit_queuelen);
1386 }
1387
1388 /**
1389 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1390 * @task: pointer to rpc_task
1391 *
1392 * Remove a task from the transmission queue
1393 */
1394 static void
1395 xprt_request_dequeue_transmit(struct rpc_task *task)
1396 {
1397 struct rpc_rqst *req = task->tk_rqstp;
1398 struct rpc_xprt *xprt = req->rq_xprt;
1399
1400 spin_lock(&xprt->queue_lock);
1401 xprt_request_dequeue_transmit_locked(task);
1402 spin_unlock(&xprt->queue_lock);
1403 }
1404
1405 /**
1406 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1407 * @task: pointer to rpc_task
1408 *
1409 * Remove a task from the transmit and receive queues, and ensure that
1410 * it is not pinned by the receive work item.
1411 */
1412 void
1413 xprt_request_dequeue_xprt(struct rpc_task *task)
1414 {
1415 struct rpc_rqst *req = task->tk_rqstp;
1416 struct rpc_xprt *xprt = req->rq_xprt;
1417
1418 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1419 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1420 xprt_is_pinned_rqst(req)) {
1421 spin_lock(&xprt->queue_lock);
1422 xprt_request_dequeue_transmit_locked(task);
1423 xprt_request_dequeue_receive_locked(task);
1424 while (xprt_is_pinned_rqst(req)) {
1425 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1426 spin_unlock(&xprt->queue_lock);
1427 xprt_wait_on_pinned_rqst(req);
1428 spin_lock(&xprt->queue_lock);
1429 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1430 }
1431 spin_unlock(&xprt->queue_lock);
1432 }
1433 }
1434
1435 /**
1436 * xprt_request_prepare - prepare an encoded request for transport
1437 * @req: pointer to rpc_rqst
1438 *
1439 * Calls into the transport layer to do whatever is needed to prepare
1440 * the request for transmission or receive.
1441 */
1442 void
1443 xprt_request_prepare(struct rpc_rqst *req)
1444 {
1445 struct rpc_xprt *xprt = req->rq_xprt;
1446
1447 if (xprt->ops->prepare_request)
1448 xprt->ops->prepare_request(req);
1449 }
1450
1451 /**
1452 * xprt_request_need_retransmit - Test if a task needs retransmission
1453 * @task: pointer to rpc_task
1454 *
1455 * Test for whether a connection breakage requires the task to retransmit
1456 */
1457 bool
1458 xprt_request_need_retransmit(struct rpc_task *task)
1459 {
1460 return xprt_request_retransmit_after_disconnect(task);
1461 }
1462
1463 /**
1464 * xprt_prepare_transmit - reserve the transport before sending a request
1465 * @task: RPC task about to send a request
1466 *
1467 */
1468 bool xprt_prepare_transmit(struct rpc_task *task)
1469 {
1470 struct rpc_rqst *req = task->tk_rqstp;
1471 struct rpc_xprt *xprt = req->rq_xprt;
1472
1473 if (!xprt_lock_write(xprt, task)) {
1474 /* Race breaker: someone may have transmitted us */
1475 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1476 rpc_wake_up_queued_task_set_status(&xprt->sending,
1477 task, 0);
1478 return false;
1479
1480 }
1481 return true;
1482 }
1483
1484 void xprt_end_transmit(struct rpc_task *task)
1485 {
1486 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1487
1488 xprt_inject_disconnect(xprt);
1489 xprt_release_write(xprt, task);
1490 }
1491
1492 /**
1493 * xprt_request_transmit - send an RPC request on a transport
1494 * @req: pointer to request to transmit
1495 * @snd_task: RPC task that owns the transport lock
1496 *
1497 * This performs the transmission of a single request.
1498 * Note that if the request is not the same as snd_task, then it
1499 * does need to be pinned.
1500 * Returns '0' on success.
1501 */
1502 static int
1503 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1504 {
1505 struct rpc_xprt *xprt = req->rq_xprt;
1506 struct rpc_task *task = req->rq_task;
1507 unsigned int connect_cookie;
1508 int is_retrans = RPC_WAS_SENT(task);
1509 int status;
1510
1511 if (!req->rq_bytes_sent) {
1512 if (xprt_request_data_received(task)) {
1513 status = 0;
1514 goto out_dequeue;
1515 }
1516 /* Verify that our message lies in the RPCSEC_GSS window */
1517 if (rpcauth_xmit_need_reencode(task)) {
1518 status = -EBADMSG;
1519 goto out_dequeue;
1520 }
1521 if (RPC_SIGNALLED(task)) {
1522 status = -ERESTARTSYS;
1523 goto out_dequeue;
1524 }
1525 }
1526
1527 /*
1528 * Update req->rq_ntrans before transmitting to avoid races with
1529 * xprt_update_rtt(), which needs to know that it is recording a
1530 * reply to the first transmission.
1531 */
1532 req->rq_ntrans++;
1533
1534 trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1535 connect_cookie = xprt->connect_cookie;
1536 status = xprt->ops->send_request(req);
1537 if (status != 0) {
1538 req->rq_ntrans--;
1539 trace_xprt_transmit(req, status);
1540 return status;
1541 }
1542
1543 if (is_retrans) {
1544 task->tk_client->cl_stats->rpcretrans++;
1545 trace_xprt_retransmit(req);
1546 }
1547
1548 xprt_inject_disconnect(xprt);
1549
1550 task->tk_flags |= RPC_TASK_SENT;
1551 spin_lock(&xprt->transport_lock);
1552
1553 xprt->stat.sends++;
1554 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1555 xprt->stat.bklog_u += xprt->backlog.qlen;
1556 xprt->stat.sending_u += xprt->sending.qlen;
1557 xprt->stat.pending_u += xprt->pending.qlen;
1558 spin_unlock(&xprt->transport_lock);
1559
1560 req->rq_connect_cookie = connect_cookie;
1561 out_dequeue:
1562 trace_xprt_transmit(req, status);
1563 xprt_request_dequeue_transmit(task);
1564 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1565 return status;
1566 }
1567
1568 /**
1569 * xprt_transmit - send an RPC request on a transport
1570 * @task: controlling RPC task
1571 *
1572 * Attempts to drain the transmit queue. On exit, either the transport
1573 * signalled an error that needs to be handled before transmission can
1574 * resume, or @task finished transmitting, and detected that it already
1575 * received a reply.
1576 */
1577 void
1578 xprt_transmit(struct rpc_task *task)
1579 {
1580 struct rpc_rqst *next, *req = task->tk_rqstp;
1581 struct rpc_xprt *xprt = req->rq_xprt;
1582 int counter, status;
1583
1584 spin_lock(&xprt->queue_lock);
1585 counter = 0;
1586 while (!list_empty(&xprt->xmit_queue)) {
1587 if (++counter == 20)
1588 break;
1589 next = list_first_entry(&xprt->xmit_queue,
1590 struct rpc_rqst, rq_xmit);
1591 xprt_pin_rqst(next);
1592 spin_unlock(&xprt->queue_lock);
1593 status = xprt_request_transmit(next, task);
1594 if (status == -EBADMSG && next != req)
1595 status = 0;
1596 spin_lock(&xprt->queue_lock);
1597 xprt_unpin_rqst(next);
1598 if (status == 0) {
1599 if (!xprt_request_data_received(task) ||
1600 test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1601 continue;
1602 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1603 task->tk_status = status;
1604 break;
1605 }
1606 spin_unlock(&xprt->queue_lock);
1607 }
1608
1609 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1610 {
1611 set_bit(XPRT_CONGESTED, &xprt->state);
1612 rpc_sleep_on(&xprt->backlog, task, NULL);
1613 }
1614
1615 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1616 {
1617 if (rpc_wake_up_next(&xprt->backlog) == NULL)
1618 clear_bit(XPRT_CONGESTED, &xprt->state);
1619 }
1620
1621 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1622 {
1623 bool ret = false;
1624
1625 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1626 goto out;
1627 spin_lock(&xprt->reserve_lock);
1628 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1629 rpc_sleep_on(&xprt->backlog, task, NULL);
1630 ret = true;
1631 }
1632 spin_unlock(&xprt->reserve_lock);
1633 out:
1634 return ret;
1635 }
1636
1637 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1638 {
1639 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1640
1641 if (xprt->num_reqs >= xprt->max_reqs)
1642 goto out;
1643 ++xprt->num_reqs;
1644 spin_unlock(&xprt->reserve_lock);
1645 req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1646 spin_lock(&xprt->reserve_lock);
1647 if (req != NULL)
1648 goto out;
1649 --xprt->num_reqs;
1650 req = ERR_PTR(-ENOMEM);
1651 out:
1652 return req;
1653 }
1654
1655 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1656 {
1657 if (xprt->num_reqs > xprt->min_reqs) {
1658 --xprt->num_reqs;
1659 kfree(req);
1660 return true;
1661 }
1662 return false;
1663 }
1664
1665 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1666 {
1667 struct rpc_rqst *req;
1668
1669 spin_lock(&xprt->reserve_lock);
1670 if (!list_empty(&xprt->free)) {
1671 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1672 list_del(&req->rq_list);
1673 goto out_init_req;
1674 }
1675 req = xprt_dynamic_alloc_slot(xprt);
1676 if (!IS_ERR(req))
1677 goto out_init_req;
1678 switch (PTR_ERR(req)) {
1679 case -ENOMEM:
1680 dprintk("RPC: dynamic allocation of request slot "
1681 "failed! Retrying\n");
1682 task->tk_status = -ENOMEM;
1683 break;
1684 case -EAGAIN:
1685 xprt_add_backlog(xprt, task);
1686 dprintk("RPC: waiting for request slot\n");
1687 fallthrough;
1688 default:
1689 task->tk_status = -EAGAIN;
1690 }
1691 spin_unlock(&xprt->reserve_lock);
1692 return;
1693 out_init_req:
1694 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1695 xprt->num_reqs);
1696 spin_unlock(&xprt->reserve_lock);
1697
1698 task->tk_status = 0;
1699 task->tk_rqstp = req;
1700 }
1701 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1702
1703 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1704 {
1705 spin_lock(&xprt->reserve_lock);
1706 if (!xprt_dynamic_free_slot(xprt, req)) {
1707 memset(req, 0, sizeof(*req)); /* mark unused */
1708 list_add(&req->rq_list, &xprt->free);
1709 }
1710 xprt_wake_up_backlog(xprt);
1711 spin_unlock(&xprt->reserve_lock);
1712 }
1713 EXPORT_SYMBOL_GPL(xprt_free_slot);
1714
1715 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1716 {
1717 struct rpc_rqst *req;
1718 while (!list_empty(&xprt->free)) {
1719 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1720 list_del(&req->rq_list);
1721 kfree(req);
1722 }
1723 }
1724
1725 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1726 unsigned int num_prealloc,
1727 unsigned int max_alloc)
1728 {
1729 struct rpc_xprt *xprt;
1730 struct rpc_rqst *req;
1731 int i;
1732
1733 xprt = kzalloc(size, GFP_KERNEL);
1734 if (xprt == NULL)
1735 goto out;
1736
1737 xprt_init(xprt, net);
1738
1739 for (i = 0; i < num_prealloc; i++) {
1740 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1741 if (!req)
1742 goto out_free;
1743 list_add(&req->rq_list, &xprt->free);
1744 }
1745 if (max_alloc > num_prealloc)
1746 xprt->max_reqs = max_alloc;
1747 else
1748 xprt->max_reqs = num_prealloc;
1749 xprt->min_reqs = num_prealloc;
1750 xprt->num_reqs = num_prealloc;
1751
1752 return xprt;
1753
1754 out_free:
1755 xprt_free(xprt);
1756 out:
1757 return NULL;
1758 }
1759 EXPORT_SYMBOL_GPL(xprt_alloc);
1760
1761 void xprt_free(struct rpc_xprt *xprt)
1762 {
1763 put_net(xprt->xprt_net);
1764 xprt_free_all_slots(xprt);
1765 kfree_rcu(xprt, rcu);
1766 }
1767 EXPORT_SYMBOL_GPL(xprt_free);
1768
1769 static void
1770 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1771 {
1772 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1773 }
1774
1775 static __be32
1776 xprt_alloc_xid(struct rpc_xprt *xprt)
1777 {
1778 __be32 xid;
1779
1780 spin_lock(&xprt->reserve_lock);
1781 xid = (__force __be32)xprt->xid++;
1782 spin_unlock(&xprt->reserve_lock);
1783 return xid;
1784 }
1785
1786 static void
1787 xprt_init_xid(struct rpc_xprt *xprt)
1788 {
1789 xprt->xid = prandom_u32();
1790 }
1791
1792 static void
1793 xprt_request_init(struct rpc_task *task)
1794 {
1795 struct rpc_xprt *xprt = task->tk_xprt;
1796 struct rpc_rqst *req = task->tk_rqstp;
1797
1798 req->rq_task = task;
1799 req->rq_xprt = xprt;
1800 req->rq_buffer = NULL;
1801 req->rq_xid = xprt_alloc_xid(xprt);
1802 xprt_init_connect_cookie(req, xprt);
1803 req->rq_snd_buf.len = 0;
1804 req->rq_snd_buf.buflen = 0;
1805 req->rq_rcv_buf.len = 0;
1806 req->rq_rcv_buf.buflen = 0;
1807 req->rq_snd_buf.bvec = NULL;
1808 req->rq_rcv_buf.bvec = NULL;
1809 req->rq_release_snd_buf = NULL;
1810 xprt_init_majortimeo(task, req);
1811
1812 trace_xprt_reserve(req);
1813 }
1814
1815 static void
1816 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1817 {
1818 xprt->ops->alloc_slot(xprt, task);
1819 if (task->tk_rqstp != NULL)
1820 xprt_request_init(task);
1821 }
1822
1823 /**
1824 * xprt_reserve - allocate an RPC request slot
1825 * @task: RPC task requesting a slot allocation
1826 *
1827 * If the transport is marked as being congested, or if no more
1828 * slots are available, place the task on the transport's
1829 * backlog queue.
1830 */
1831 void xprt_reserve(struct rpc_task *task)
1832 {
1833 struct rpc_xprt *xprt = task->tk_xprt;
1834
1835 task->tk_status = 0;
1836 if (task->tk_rqstp != NULL)
1837 return;
1838
1839 task->tk_status = -EAGAIN;
1840 if (!xprt_throttle_congested(xprt, task))
1841 xprt_do_reserve(xprt, task);
1842 }
1843
1844 /**
1845 * xprt_retry_reserve - allocate an RPC request slot
1846 * @task: RPC task requesting a slot allocation
1847 *
1848 * If no more slots are available, place the task on the transport's
1849 * backlog queue.
1850 * Note that the only difference with xprt_reserve is that we now
1851 * ignore the value of the XPRT_CONGESTED flag.
1852 */
1853 void xprt_retry_reserve(struct rpc_task *task)
1854 {
1855 struct rpc_xprt *xprt = task->tk_xprt;
1856
1857 task->tk_status = 0;
1858 if (task->tk_rqstp != NULL)
1859 return;
1860
1861 task->tk_status = -EAGAIN;
1862 xprt_do_reserve(xprt, task);
1863 }
1864
1865 /**
1866 * xprt_release - release an RPC request slot
1867 * @task: task which is finished with the slot
1868 *
1869 */
1870 void xprt_release(struct rpc_task *task)
1871 {
1872 struct rpc_xprt *xprt;
1873 struct rpc_rqst *req = task->tk_rqstp;
1874
1875 if (req == NULL) {
1876 if (task->tk_client) {
1877 xprt = task->tk_xprt;
1878 xprt_release_write(xprt, task);
1879 }
1880 return;
1881 }
1882
1883 xprt = req->rq_xprt;
1884 xprt_request_dequeue_xprt(task);
1885 spin_lock(&xprt->transport_lock);
1886 xprt->ops->release_xprt(xprt, task);
1887 if (xprt->ops->release_request)
1888 xprt->ops->release_request(task);
1889 xprt_schedule_autodisconnect(xprt);
1890 spin_unlock(&xprt->transport_lock);
1891 if (req->rq_buffer)
1892 xprt->ops->buf_free(task);
1893 xdr_free_bvec(&req->rq_rcv_buf);
1894 xdr_free_bvec(&req->rq_snd_buf);
1895 if (req->rq_cred != NULL)
1896 put_rpccred(req->rq_cred);
1897 task->tk_rqstp = NULL;
1898 if (req->rq_release_snd_buf)
1899 req->rq_release_snd_buf(req);
1900
1901 if (likely(!bc_prealloc(req)))
1902 xprt->ops->free_slot(xprt, req);
1903 else
1904 xprt_free_bc_request(req);
1905 }
1906
1907 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1908 void
1909 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1910 {
1911 struct xdr_buf *xbufp = &req->rq_snd_buf;
1912
1913 task->tk_rqstp = req;
1914 req->rq_task = task;
1915 xprt_init_connect_cookie(req, req->rq_xprt);
1916 /*
1917 * Set up the xdr_buf length.
1918 * This also indicates that the buffer is XDR encoded already.
1919 */
1920 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1921 xbufp->tail[0].iov_len;
1922 }
1923 #endif
1924
1925 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1926 {
1927 kref_init(&xprt->kref);
1928
1929 spin_lock_init(&xprt->transport_lock);
1930 spin_lock_init(&xprt->reserve_lock);
1931 spin_lock_init(&xprt->queue_lock);
1932
1933 INIT_LIST_HEAD(&xprt->free);
1934 xprt->recv_queue = RB_ROOT;
1935 INIT_LIST_HEAD(&xprt->xmit_queue);
1936 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1937 spin_lock_init(&xprt->bc_pa_lock);
1938 INIT_LIST_HEAD(&xprt->bc_pa_list);
1939 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1940 INIT_LIST_HEAD(&xprt->xprt_switch);
1941
1942 xprt->last_used = jiffies;
1943 xprt->cwnd = RPC_INITCWND;
1944 xprt->bind_index = 0;
1945
1946 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1947 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1948 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1949 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1950
1951 xprt_init_xid(xprt);
1952
1953 xprt->xprt_net = get_net(net);
1954 }
1955
1956 /**
1957 * xprt_create_transport - create an RPC transport
1958 * @args: rpc transport creation arguments
1959 *
1960 */
1961 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1962 {
1963 struct rpc_xprt *xprt;
1964 const struct xprt_class *t;
1965
1966 t = xprt_class_find_by_ident(args->ident);
1967 if (!t) {
1968 dprintk("RPC: transport (%d) not supported\n", args->ident);
1969 return ERR_PTR(-EIO);
1970 }
1971
1972 xprt = t->setup(args);
1973 xprt_class_release(t);
1974
1975 if (IS_ERR(xprt))
1976 goto out;
1977 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1978 xprt->idle_timeout = 0;
1979 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1980 if (xprt_has_timer(xprt))
1981 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1982 else
1983 timer_setup(&xprt->timer, NULL, 0);
1984
1985 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1986 xprt_destroy(xprt);
1987 return ERR_PTR(-EINVAL);
1988 }
1989 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1990 if (xprt->servername == NULL) {
1991 xprt_destroy(xprt);
1992 return ERR_PTR(-ENOMEM);
1993 }
1994
1995 rpc_xprt_debugfs_register(xprt);
1996
1997 trace_xprt_create(xprt);
1998 out:
1999 return xprt;
2000 }
2001
2002 static void xprt_destroy_cb(struct work_struct *work)
2003 {
2004 struct rpc_xprt *xprt =
2005 container_of(work, struct rpc_xprt, task_cleanup);
2006
2007 trace_xprt_destroy(xprt);
2008
2009 rpc_xprt_debugfs_unregister(xprt);
2010 rpc_destroy_wait_queue(&xprt->binding);
2011 rpc_destroy_wait_queue(&xprt->pending);
2012 rpc_destroy_wait_queue(&xprt->sending);
2013 rpc_destroy_wait_queue(&xprt->backlog);
2014 kfree(xprt->servername);
2015 /*
2016 * Destroy any existing back channel
2017 */
2018 xprt_destroy_backchannel(xprt, UINT_MAX);
2019
2020 /*
2021 * Tear down transport state and free the rpc_xprt
2022 */
2023 xprt->ops->destroy(xprt);
2024 }
2025
2026 /**
2027 * xprt_destroy - destroy an RPC transport, killing off all requests.
2028 * @xprt: transport to destroy
2029 *
2030 */
2031 static void xprt_destroy(struct rpc_xprt *xprt)
2032 {
2033 /*
2034 * Exclude transport connect/disconnect handlers and autoclose
2035 */
2036 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2037
2038 del_timer_sync(&xprt->timer);
2039
2040 /*
2041 * Destroy sockets etc from the system workqueue so they can
2042 * safely flush receive work running on rpciod.
2043 */
2044 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2045 schedule_work(&xprt->task_cleanup);
2046 }
2047
2048 static void xprt_destroy_kref(struct kref *kref)
2049 {
2050 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2051 }
2052
2053 /**
2054 * xprt_get - return a reference to an RPC transport.
2055 * @xprt: pointer to the transport
2056 *
2057 */
2058 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2059 {
2060 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2061 return xprt;
2062 return NULL;
2063 }
2064 EXPORT_SYMBOL_GPL(xprt_get);
2065
2066 /**
2067 * xprt_put - release a reference to an RPC transport.
2068 * @xprt: pointer to the transport
2069 *
2070 */
2071 void xprt_put(struct rpc_xprt *xprt)
2072 {
2073 if (xprt != NULL)
2074 kref_put(&xprt->kref, xprt_destroy_kref);
2075 }
2076 EXPORT_SYMBOL_GPL(xprt_put);