]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/sunrpc/xprt.c
Merge tag 'nfs-rdma-for-5.3-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
[mirror_ubuntu-jammy-kernel.git] / net / sunrpc / xprt.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/net/sunrpc/xprt.c
4 *
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
7 *
8 * The interface works like this:
9 *
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
12 * (xprt_reserve).
13 * - Next, the caller puts together the RPC message, stuffs it into
14 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
16 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
18 * expired.
19 * - When a packet arrives, the data_ready handler walks the list of
20 * pending requests for that transport. If a matching XID is found, the
21 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
25 * of -ETIMEDOUT.
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
30 * again.
31 *
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
35 *
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37 *
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39 */
40
41 #include <linux/module.h>
42
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
48
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
54
55 #include <trace/events/sunrpc.h>
56
57 #include "sunrpc.h"
58
59 /*
60 * Local variables
61 */
62
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY RPCDBG_XPRT
65 #endif
66
67 /*
68 * Local functions
69 */
70 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
72 static void xprt_destroy(struct rpc_xprt *xprt);
73
74 static DEFINE_SPINLOCK(xprt_list_lock);
75 static LIST_HEAD(xprt_list);
76
77 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
78 {
79 unsigned long timeout = jiffies + req->rq_timeout;
80
81 if (time_before(timeout, req->rq_majortimeo))
82 return timeout;
83 return req->rq_majortimeo;
84 }
85
86 /**
87 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
89 *
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
92 *
93 * Returns:
94 * 0: transport successfully registered
95 * -EEXIST: transport already registered
96 * -EINVAL: transport module being unloaded
97 */
98 int xprt_register_transport(struct xprt_class *transport)
99 {
100 struct xprt_class *t;
101 int result;
102
103 result = -EEXIST;
104 spin_lock(&xprt_list_lock);
105 list_for_each_entry(t, &xprt_list, list) {
106 /* don't register the same transport class twice */
107 if (t->ident == transport->ident)
108 goto out;
109 }
110
111 list_add_tail(&transport->list, &xprt_list);
112 printk(KERN_INFO "RPC: Registered %s transport module.\n",
113 transport->name);
114 result = 0;
115
116 out:
117 spin_unlock(&xprt_list_lock);
118 return result;
119 }
120 EXPORT_SYMBOL_GPL(xprt_register_transport);
121
122 /**
123 * xprt_unregister_transport - unregister a transport implementation
124 * @transport: transport to unregister
125 *
126 * Returns:
127 * 0: transport successfully unregistered
128 * -ENOENT: transport never registered
129 */
130 int xprt_unregister_transport(struct xprt_class *transport)
131 {
132 struct xprt_class *t;
133 int result;
134
135 result = 0;
136 spin_lock(&xprt_list_lock);
137 list_for_each_entry(t, &xprt_list, list) {
138 if (t == transport) {
139 printk(KERN_INFO
140 "RPC: Unregistered %s transport module.\n",
141 transport->name);
142 list_del_init(&transport->list);
143 goto out;
144 }
145 }
146 result = -ENOENT;
147
148 out:
149 spin_unlock(&xprt_list_lock);
150 return result;
151 }
152 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
153
154 /**
155 * xprt_load_transport - load a transport implementation
156 * @transport_name: transport to load
157 *
158 * Returns:
159 * 0: transport successfully loaded
160 * -ENOENT: transport module not available
161 */
162 int xprt_load_transport(const char *transport_name)
163 {
164 struct xprt_class *t;
165 int result;
166
167 result = 0;
168 spin_lock(&xprt_list_lock);
169 list_for_each_entry(t, &xprt_list, list) {
170 if (strcmp(t->name, transport_name) == 0) {
171 spin_unlock(&xprt_list_lock);
172 goto out;
173 }
174 }
175 spin_unlock(&xprt_list_lock);
176 result = request_module("xprt%s", transport_name);
177 out:
178 return result;
179 }
180 EXPORT_SYMBOL_GPL(xprt_load_transport);
181
182 static void xprt_clear_locked(struct rpc_xprt *xprt)
183 {
184 xprt->snd_task = NULL;
185 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
186 smp_mb__before_atomic();
187 clear_bit(XPRT_LOCKED, &xprt->state);
188 smp_mb__after_atomic();
189 } else
190 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
191 }
192
193 /**
194 * xprt_reserve_xprt - serialize write access to transports
195 * @task: task that is requesting access to the transport
196 * @xprt: pointer to the target transport
197 *
198 * This prevents mixing the payload of separate requests, and prevents
199 * transport connects from colliding with writes. No congestion control
200 * is provided.
201 */
202 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
203 {
204 struct rpc_rqst *req = task->tk_rqstp;
205
206 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
207 if (task == xprt->snd_task)
208 return 1;
209 goto out_sleep;
210 }
211 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
212 goto out_unlock;
213 xprt->snd_task = task;
214
215 return 1;
216
217 out_unlock:
218 xprt_clear_locked(xprt);
219 out_sleep:
220 dprintk("RPC: %5u failed to lock transport %p\n",
221 task->tk_pid, xprt);
222 task->tk_status = -EAGAIN;
223 if (RPC_IS_SOFT(task))
224 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
225 xprt_request_timeout(req));
226 else
227 rpc_sleep_on(&xprt->sending, task, NULL);
228 return 0;
229 }
230 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
231
232 static bool
233 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
234 {
235 return test_bit(XPRT_CWND_WAIT, &xprt->state);
236 }
237
238 static void
239 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
240 {
241 if (!list_empty(&xprt->xmit_queue)) {
242 /* Peek at head of queue to see if it can make progress */
243 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
244 rq_xmit)->rq_cong)
245 return;
246 }
247 set_bit(XPRT_CWND_WAIT, &xprt->state);
248 }
249
250 static void
251 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
252 {
253 if (!RPCXPRT_CONGESTED(xprt))
254 clear_bit(XPRT_CWND_WAIT, &xprt->state);
255 }
256
257 /*
258 * xprt_reserve_xprt_cong - serialize write access to transports
259 * @task: task that is requesting access to the transport
260 *
261 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
262 * integrated into the decision of whether a request is allowed to be
263 * woken up and given access to the transport.
264 * Note that the lock is only granted if we know there are free slots.
265 */
266 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
267 {
268 struct rpc_rqst *req = task->tk_rqstp;
269
270 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
271 if (task == xprt->snd_task)
272 return 1;
273 goto out_sleep;
274 }
275 if (req == NULL) {
276 xprt->snd_task = task;
277 return 1;
278 }
279 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
280 goto out_unlock;
281 if (!xprt_need_congestion_window_wait(xprt)) {
282 xprt->snd_task = task;
283 return 1;
284 }
285 out_unlock:
286 xprt_clear_locked(xprt);
287 out_sleep:
288 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
289 task->tk_status = -EAGAIN;
290 if (RPC_IS_SOFT(task))
291 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
292 xprt_request_timeout(req));
293 else
294 rpc_sleep_on(&xprt->sending, task, NULL);
295 return 0;
296 }
297 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
298
299 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
300 {
301 int retval;
302
303 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
304 return 1;
305 spin_lock(&xprt->transport_lock);
306 retval = xprt->ops->reserve_xprt(xprt, task);
307 spin_unlock(&xprt->transport_lock);
308 return retval;
309 }
310
311 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
312 {
313 struct rpc_xprt *xprt = data;
314
315 xprt->snd_task = task;
316 return true;
317 }
318
319 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
320 {
321 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
322 return;
323 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
324 goto out_unlock;
325 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
326 __xprt_lock_write_func, xprt))
327 return;
328 out_unlock:
329 xprt_clear_locked(xprt);
330 }
331
332 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
333 {
334 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
335 return;
336 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
337 goto out_unlock;
338 if (xprt_need_congestion_window_wait(xprt))
339 goto out_unlock;
340 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
341 __xprt_lock_write_func, xprt))
342 return;
343 out_unlock:
344 xprt_clear_locked(xprt);
345 }
346
347 /**
348 * xprt_release_xprt - allow other requests to use a transport
349 * @xprt: transport with other tasks potentially waiting
350 * @task: task that is releasing access to the transport
351 *
352 * Note that "task" can be NULL. No congestion control is provided.
353 */
354 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
355 {
356 if (xprt->snd_task == task) {
357 xprt_clear_locked(xprt);
358 __xprt_lock_write_next(xprt);
359 }
360 }
361 EXPORT_SYMBOL_GPL(xprt_release_xprt);
362
363 /**
364 * xprt_release_xprt_cong - allow other requests to use a transport
365 * @xprt: transport with other tasks potentially waiting
366 * @task: task that is releasing access to the transport
367 *
368 * Note that "task" can be NULL. Another task is awoken to use the
369 * transport if the transport's congestion window allows it.
370 */
371 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
372 {
373 if (xprt->snd_task == task) {
374 xprt_clear_locked(xprt);
375 __xprt_lock_write_next_cong(xprt);
376 }
377 }
378 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
379
380 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
381 {
382 if (xprt->snd_task != task)
383 return;
384 spin_lock(&xprt->transport_lock);
385 xprt->ops->release_xprt(xprt, task);
386 spin_unlock(&xprt->transport_lock);
387 }
388
389 /*
390 * Van Jacobson congestion avoidance. Check if the congestion window
391 * overflowed. Put the task to sleep if this is the case.
392 */
393 static int
394 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
395 {
396 if (req->rq_cong)
397 return 1;
398 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
399 req->rq_task->tk_pid, xprt->cong, xprt->cwnd);
400 if (RPCXPRT_CONGESTED(xprt)) {
401 xprt_set_congestion_window_wait(xprt);
402 return 0;
403 }
404 req->rq_cong = 1;
405 xprt->cong += RPC_CWNDSCALE;
406 return 1;
407 }
408
409 /*
410 * Adjust the congestion window, and wake up the next task
411 * that has been sleeping due to congestion
412 */
413 static void
414 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
415 {
416 if (!req->rq_cong)
417 return;
418 req->rq_cong = 0;
419 xprt->cong -= RPC_CWNDSCALE;
420 xprt_test_and_clear_congestion_window_wait(xprt);
421 __xprt_lock_write_next_cong(xprt);
422 }
423
424 /**
425 * xprt_request_get_cong - Request congestion control credits
426 * @xprt: pointer to transport
427 * @req: pointer to RPC request
428 *
429 * Useful for transports that require congestion control.
430 */
431 bool
432 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
433 {
434 bool ret = false;
435
436 if (req->rq_cong)
437 return true;
438 spin_lock(&xprt->transport_lock);
439 ret = __xprt_get_cong(xprt, req) != 0;
440 spin_unlock(&xprt->transport_lock);
441 return ret;
442 }
443 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
444
445 /**
446 * xprt_release_rqst_cong - housekeeping when request is complete
447 * @task: RPC request that recently completed
448 *
449 * Useful for transports that require congestion control.
450 */
451 void xprt_release_rqst_cong(struct rpc_task *task)
452 {
453 struct rpc_rqst *req = task->tk_rqstp;
454
455 __xprt_put_cong(req->rq_xprt, req);
456 }
457 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
458
459 /*
460 * Clear the congestion window wait flag and wake up the next
461 * entry on xprt->sending
462 */
463 static void
464 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
465 {
466 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
467 spin_lock(&xprt->transport_lock);
468 __xprt_lock_write_next_cong(xprt);
469 spin_unlock(&xprt->transport_lock);
470 }
471 }
472
473 /**
474 * xprt_adjust_cwnd - adjust transport congestion window
475 * @xprt: pointer to xprt
476 * @task: recently completed RPC request used to adjust window
477 * @result: result code of completed RPC request
478 *
479 * The transport code maintains an estimate on the maximum number of out-
480 * standing RPC requests, using a smoothed version of the congestion
481 * avoidance implemented in 44BSD. This is basically the Van Jacobson
482 * congestion algorithm: If a retransmit occurs, the congestion window is
483 * halved; otherwise, it is incremented by 1/cwnd when
484 *
485 * - a reply is received and
486 * - a full number of requests are outstanding and
487 * - the congestion window hasn't been updated recently.
488 */
489 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
490 {
491 struct rpc_rqst *req = task->tk_rqstp;
492 unsigned long cwnd = xprt->cwnd;
493
494 if (result >= 0 && cwnd <= xprt->cong) {
495 /* The (cwnd >> 1) term makes sure
496 * the result gets rounded properly. */
497 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
498 if (cwnd > RPC_MAXCWND(xprt))
499 cwnd = RPC_MAXCWND(xprt);
500 __xprt_lock_write_next_cong(xprt);
501 } else if (result == -ETIMEDOUT) {
502 cwnd >>= 1;
503 if (cwnd < RPC_CWNDSCALE)
504 cwnd = RPC_CWNDSCALE;
505 }
506 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
507 xprt->cong, xprt->cwnd, cwnd);
508 xprt->cwnd = cwnd;
509 __xprt_put_cong(xprt, req);
510 }
511 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
512
513 /**
514 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
515 * @xprt: transport with waiting tasks
516 * @status: result code to plant in each task before waking it
517 *
518 */
519 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
520 {
521 if (status < 0)
522 rpc_wake_up_status(&xprt->pending, status);
523 else
524 rpc_wake_up(&xprt->pending);
525 }
526 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
527
528 /**
529 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
530 * @xprt: transport
531 *
532 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
533 * we don't in general want to force a socket disconnection due to
534 * an incomplete RPC call transmission.
535 */
536 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
537 {
538 set_bit(XPRT_WRITE_SPACE, &xprt->state);
539 }
540 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
541
542 static bool
543 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
544 {
545 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
546 __xprt_lock_write_next(xprt);
547 dprintk("RPC: write space: waking waiting task on "
548 "xprt %p\n", xprt);
549 return true;
550 }
551 return false;
552 }
553
554 /**
555 * xprt_write_space - wake the task waiting for transport output buffer space
556 * @xprt: transport with waiting tasks
557 *
558 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
559 */
560 bool xprt_write_space(struct rpc_xprt *xprt)
561 {
562 bool ret;
563
564 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
565 return false;
566 spin_lock(&xprt->transport_lock);
567 ret = xprt_clear_write_space_locked(xprt);
568 spin_unlock(&xprt->transport_lock);
569 return ret;
570 }
571 EXPORT_SYMBOL_GPL(xprt_write_space);
572
573 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
574 {
575 s64 delta = ktime_to_ns(ktime_get() - abstime);
576 return likely(delta >= 0) ?
577 jiffies - nsecs_to_jiffies(delta) :
578 jiffies + nsecs_to_jiffies(-delta);
579 }
580
581 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
582 {
583 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
584 unsigned long majortimeo = req->rq_timeout;
585
586 if (to->to_exponential)
587 majortimeo <<= to->to_retries;
588 else
589 majortimeo += to->to_increment * to->to_retries;
590 if (majortimeo > to->to_maxval || majortimeo == 0)
591 majortimeo = to->to_maxval;
592 return majortimeo;
593 }
594
595 static void xprt_reset_majortimeo(struct rpc_rqst *req)
596 {
597 req->rq_majortimeo += xprt_calc_majortimeo(req);
598 }
599
600 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
601 {
602 unsigned long time_init;
603 struct rpc_xprt *xprt = req->rq_xprt;
604
605 if (likely(xprt && xprt_connected(xprt)))
606 time_init = jiffies;
607 else
608 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
609 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
610 req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
611 }
612
613 /**
614 * xprt_adjust_timeout - adjust timeout values for next retransmit
615 * @req: RPC request containing parameters to use for the adjustment
616 *
617 */
618 int xprt_adjust_timeout(struct rpc_rqst *req)
619 {
620 struct rpc_xprt *xprt = req->rq_xprt;
621 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
622 int status = 0;
623
624 if (time_before(jiffies, req->rq_majortimeo)) {
625 if (to->to_exponential)
626 req->rq_timeout <<= 1;
627 else
628 req->rq_timeout += to->to_increment;
629 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
630 req->rq_timeout = to->to_maxval;
631 req->rq_retries++;
632 } else {
633 req->rq_timeout = to->to_initval;
634 req->rq_retries = 0;
635 xprt_reset_majortimeo(req);
636 /* Reset the RTT counters == "slow start" */
637 spin_lock(&xprt->transport_lock);
638 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
639 spin_unlock(&xprt->transport_lock);
640 status = -ETIMEDOUT;
641 }
642
643 if (req->rq_timeout == 0) {
644 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
645 req->rq_timeout = 5 * HZ;
646 }
647 return status;
648 }
649
650 static void xprt_autoclose(struct work_struct *work)
651 {
652 struct rpc_xprt *xprt =
653 container_of(work, struct rpc_xprt, task_cleanup);
654 unsigned int pflags = memalloc_nofs_save();
655
656 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
657 xprt->ops->close(xprt);
658 xprt_release_write(xprt, NULL);
659 wake_up_bit(&xprt->state, XPRT_LOCKED);
660 memalloc_nofs_restore(pflags);
661 }
662
663 /**
664 * xprt_disconnect_done - mark a transport as disconnected
665 * @xprt: transport to flag for disconnect
666 *
667 */
668 void xprt_disconnect_done(struct rpc_xprt *xprt)
669 {
670 dprintk("RPC: disconnected transport %p\n", xprt);
671 spin_lock(&xprt->transport_lock);
672 xprt_clear_connected(xprt);
673 xprt_clear_write_space_locked(xprt);
674 xprt_wake_pending_tasks(xprt, -ENOTCONN);
675 spin_unlock(&xprt->transport_lock);
676 }
677 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
678
679 /**
680 * xprt_force_disconnect - force a transport to disconnect
681 * @xprt: transport to disconnect
682 *
683 */
684 void xprt_force_disconnect(struct rpc_xprt *xprt)
685 {
686 /* Don't race with the test_bit() in xprt_clear_locked() */
687 spin_lock(&xprt->transport_lock);
688 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
689 /* Try to schedule an autoclose RPC call */
690 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
691 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
692 else if (xprt->snd_task)
693 rpc_wake_up_queued_task_set_status(&xprt->pending,
694 xprt->snd_task, -ENOTCONN);
695 spin_unlock(&xprt->transport_lock);
696 }
697 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
698
699 static unsigned int
700 xprt_connect_cookie(struct rpc_xprt *xprt)
701 {
702 return READ_ONCE(xprt->connect_cookie);
703 }
704
705 static bool
706 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
707 {
708 struct rpc_rqst *req = task->tk_rqstp;
709 struct rpc_xprt *xprt = req->rq_xprt;
710
711 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
712 !xprt_connected(xprt);
713 }
714
715 /**
716 * xprt_conditional_disconnect - force a transport to disconnect
717 * @xprt: transport to disconnect
718 * @cookie: 'connection cookie'
719 *
720 * This attempts to break the connection if and only if 'cookie' matches
721 * the current transport 'connection cookie'. It ensures that we don't
722 * try to break the connection more than once when we need to retransmit
723 * a batch of RPC requests.
724 *
725 */
726 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
727 {
728 /* Don't race with the test_bit() in xprt_clear_locked() */
729 spin_lock(&xprt->transport_lock);
730 if (cookie != xprt->connect_cookie)
731 goto out;
732 if (test_bit(XPRT_CLOSING, &xprt->state))
733 goto out;
734 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
735 /* Try to schedule an autoclose RPC call */
736 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
737 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
738 xprt_wake_pending_tasks(xprt, -EAGAIN);
739 out:
740 spin_unlock(&xprt->transport_lock);
741 }
742
743 static bool
744 xprt_has_timer(const struct rpc_xprt *xprt)
745 {
746 return xprt->idle_timeout != 0;
747 }
748
749 static void
750 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
751 __must_hold(&xprt->transport_lock)
752 {
753 xprt->last_used = jiffies;
754 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
755 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
756 }
757
758 static void
759 xprt_init_autodisconnect(struct timer_list *t)
760 {
761 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
762
763 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
764 return;
765 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
766 xprt->last_used = jiffies;
767 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
768 return;
769 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
770 }
771
772 bool xprt_lock_connect(struct rpc_xprt *xprt,
773 struct rpc_task *task,
774 void *cookie)
775 {
776 bool ret = false;
777
778 spin_lock(&xprt->transport_lock);
779 if (!test_bit(XPRT_LOCKED, &xprt->state))
780 goto out;
781 if (xprt->snd_task != task)
782 goto out;
783 xprt->snd_task = cookie;
784 ret = true;
785 out:
786 spin_unlock(&xprt->transport_lock);
787 return ret;
788 }
789
790 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
791 {
792 spin_lock(&xprt->transport_lock);
793 if (xprt->snd_task != cookie)
794 goto out;
795 if (!test_bit(XPRT_LOCKED, &xprt->state))
796 goto out;
797 xprt->snd_task =NULL;
798 xprt->ops->release_xprt(xprt, NULL);
799 xprt_schedule_autodisconnect(xprt);
800 out:
801 spin_unlock(&xprt->transport_lock);
802 wake_up_bit(&xprt->state, XPRT_LOCKED);
803 }
804
805 /**
806 * xprt_connect - schedule a transport connect operation
807 * @task: RPC task that is requesting the connect
808 *
809 */
810 void xprt_connect(struct rpc_task *task)
811 {
812 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
813
814 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
815 xprt, (xprt_connected(xprt) ? "is" : "is not"));
816
817 if (!xprt_bound(xprt)) {
818 task->tk_status = -EAGAIN;
819 return;
820 }
821 if (!xprt_lock_write(xprt, task))
822 return;
823
824 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
825 xprt->ops->close(xprt);
826
827 if (!xprt_connected(xprt)) {
828 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
829 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
830 xprt_request_timeout(task->tk_rqstp));
831
832 if (test_bit(XPRT_CLOSING, &xprt->state))
833 return;
834 if (xprt_test_and_set_connecting(xprt))
835 return;
836 /* Race breaker */
837 if (!xprt_connected(xprt)) {
838 xprt->stat.connect_start = jiffies;
839 xprt->ops->connect(xprt, task);
840 } else {
841 xprt_clear_connecting(xprt);
842 task->tk_status = 0;
843 rpc_wake_up_queued_task(&xprt->pending, task);
844 }
845 }
846 xprt_release_write(xprt, task);
847 }
848
849 /**
850 * xprt_reconnect_delay - compute the wait before scheduling a connect
851 * @xprt: transport instance
852 *
853 */
854 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
855 {
856 unsigned long start, now = jiffies;
857
858 start = xprt->stat.connect_start + xprt->reestablish_timeout;
859 if (time_after(start, now))
860 return start - now;
861 return 0;
862 }
863 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
864
865 /**
866 * xprt_reconnect_backoff - compute the new re-establish timeout
867 * @xprt: transport instance
868 * @init_to: initial reestablish timeout
869 *
870 */
871 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
872 {
873 xprt->reestablish_timeout <<= 1;
874 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
875 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
876 if (xprt->reestablish_timeout < init_to)
877 xprt->reestablish_timeout = init_to;
878 }
879 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
880
881 enum xprt_xid_rb_cmp {
882 XID_RB_EQUAL,
883 XID_RB_LEFT,
884 XID_RB_RIGHT,
885 };
886 static enum xprt_xid_rb_cmp
887 xprt_xid_cmp(__be32 xid1, __be32 xid2)
888 {
889 if (xid1 == xid2)
890 return XID_RB_EQUAL;
891 if ((__force u32)xid1 < (__force u32)xid2)
892 return XID_RB_LEFT;
893 return XID_RB_RIGHT;
894 }
895
896 static struct rpc_rqst *
897 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
898 {
899 struct rb_node *n = xprt->recv_queue.rb_node;
900 struct rpc_rqst *req;
901
902 while (n != NULL) {
903 req = rb_entry(n, struct rpc_rqst, rq_recv);
904 switch (xprt_xid_cmp(xid, req->rq_xid)) {
905 case XID_RB_LEFT:
906 n = n->rb_left;
907 break;
908 case XID_RB_RIGHT:
909 n = n->rb_right;
910 break;
911 case XID_RB_EQUAL:
912 return req;
913 }
914 }
915 return NULL;
916 }
917
918 static void
919 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
920 {
921 struct rb_node **p = &xprt->recv_queue.rb_node;
922 struct rb_node *n = NULL;
923 struct rpc_rqst *req;
924
925 while (*p != NULL) {
926 n = *p;
927 req = rb_entry(n, struct rpc_rqst, rq_recv);
928 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
929 case XID_RB_LEFT:
930 p = &n->rb_left;
931 break;
932 case XID_RB_RIGHT:
933 p = &n->rb_right;
934 break;
935 case XID_RB_EQUAL:
936 WARN_ON_ONCE(new != req);
937 return;
938 }
939 }
940 rb_link_node(&new->rq_recv, n, p);
941 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
942 }
943
944 static void
945 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
946 {
947 rb_erase(&req->rq_recv, &xprt->recv_queue);
948 }
949
950 /**
951 * xprt_lookup_rqst - find an RPC request corresponding to an XID
952 * @xprt: transport on which the original request was transmitted
953 * @xid: RPC XID of incoming reply
954 *
955 * Caller holds xprt->queue_lock.
956 */
957 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
958 {
959 struct rpc_rqst *entry;
960
961 entry = xprt_request_rb_find(xprt, xid);
962 if (entry != NULL) {
963 trace_xprt_lookup_rqst(xprt, xid, 0);
964 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
965 return entry;
966 }
967
968 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
969 ntohl(xid));
970 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
971 xprt->stat.bad_xids++;
972 return NULL;
973 }
974 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
975
976 static bool
977 xprt_is_pinned_rqst(struct rpc_rqst *req)
978 {
979 return atomic_read(&req->rq_pin) != 0;
980 }
981
982 /**
983 * xprt_pin_rqst - Pin a request on the transport receive list
984 * @req: Request to pin
985 *
986 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
987 * so should be holding xprt->queue_lock.
988 */
989 void xprt_pin_rqst(struct rpc_rqst *req)
990 {
991 atomic_inc(&req->rq_pin);
992 }
993 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
994
995 /**
996 * xprt_unpin_rqst - Unpin a request on the transport receive list
997 * @req: Request to pin
998 *
999 * Caller should be holding xprt->queue_lock.
1000 */
1001 void xprt_unpin_rqst(struct rpc_rqst *req)
1002 {
1003 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1004 atomic_dec(&req->rq_pin);
1005 return;
1006 }
1007 if (atomic_dec_and_test(&req->rq_pin))
1008 wake_up_var(&req->rq_pin);
1009 }
1010 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1011
1012 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1013 {
1014 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1015 }
1016
1017 static bool
1018 xprt_request_data_received(struct rpc_task *task)
1019 {
1020 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1021 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1022 }
1023
1024 static bool
1025 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1026 {
1027 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1028 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1029 }
1030
1031 /**
1032 * xprt_request_enqueue_receive - Add an request to the receive queue
1033 * @task: RPC task
1034 *
1035 */
1036 void
1037 xprt_request_enqueue_receive(struct rpc_task *task)
1038 {
1039 struct rpc_rqst *req = task->tk_rqstp;
1040 struct rpc_xprt *xprt = req->rq_xprt;
1041
1042 if (!xprt_request_need_enqueue_receive(task, req))
1043 return;
1044 spin_lock(&xprt->queue_lock);
1045
1046 /* Update the softirq receive buffer */
1047 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1048 sizeof(req->rq_private_buf));
1049
1050 /* Add request to the receive list */
1051 xprt_request_rb_insert(xprt, req);
1052 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1053 spin_unlock(&xprt->queue_lock);
1054
1055 /* Turn off autodisconnect */
1056 del_singleshot_timer_sync(&xprt->timer);
1057 }
1058
1059 /**
1060 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1061 * @task: RPC task
1062 *
1063 * Caller must hold xprt->queue_lock.
1064 */
1065 static void
1066 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1067 {
1068 struct rpc_rqst *req = task->tk_rqstp;
1069
1070 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1071 xprt_request_rb_remove(req->rq_xprt, req);
1072 }
1073
1074 /**
1075 * xprt_update_rtt - Update RPC RTT statistics
1076 * @task: RPC request that recently completed
1077 *
1078 * Caller holds xprt->queue_lock.
1079 */
1080 void xprt_update_rtt(struct rpc_task *task)
1081 {
1082 struct rpc_rqst *req = task->tk_rqstp;
1083 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1084 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1085 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1086
1087 if (timer) {
1088 if (req->rq_ntrans == 1)
1089 rpc_update_rtt(rtt, timer, m);
1090 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1091 }
1092 }
1093 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1094
1095 /**
1096 * xprt_complete_rqst - called when reply processing is complete
1097 * @task: RPC request that recently completed
1098 * @copied: actual number of bytes received from the transport
1099 *
1100 * Caller holds xprt->queue_lock.
1101 */
1102 void xprt_complete_rqst(struct rpc_task *task, int copied)
1103 {
1104 struct rpc_rqst *req = task->tk_rqstp;
1105 struct rpc_xprt *xprt = req->rq_xprt;
1106
1107 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
1108 task->tk_pid, ntohl(req->rq_xid), copied);
1109 trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
1110
1111 xprt->stat.recvs++;
1112
1113 req->rq_private_buf.len = copied;
1114 /* Ensure all writes are done before we update */
1115 /* req->rq_reply_bytes_recvd */
1116 smp_wmb();
1117 req->rq_reply_bytes_recvd = copied;
1118 xprt_request_dequeue_receive_locked(task);
1119 rpc_wake_up_queued_task(&xprt->pending, task);
1120 }
1121 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1122
1123 static void xprt_timer(struct rpc_task *task)
1124 {
1125 struct rpc_rqst *req = task->tk_rqstp;
1126 struct rpc_xprt *xprt = req->rq_xprt;
1127
1128 if (task->tk_status != -ETIMEDOUT)
1129 return;
1130
1131 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1132 if (!req->rq_reply_bytes_recvd) {
1133 if (xprt->ops->timer)
1134 xprt->ops->timer(xprt, task);
1135 } else
1136 task->tk_status = 0;
1137 }
1138
1139 /**
1140 * xprt_wait_for_reply_request_def - wait for reply
1141 * @task: pointer to rpc_task
1142 *
1143 * Set a request's retransmit timeout based on the transport's
1144 * default timeout parameters. Used by transports that don't adjust
1145 * the retransmit timeout based on round-trip time estimation,
1146 * and put the task to sleep on the pending queue.
1147 */
1148 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1149 {
1150 struct rpc_rqst *req = task->tk_rqstp;
1151
1152 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1153 xprt_request_timeout(req));
1154 }
1155 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1156
1157 /**
1158 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1159 * @task: pointer to rpc_task
1160 *
1161 * Set a request's retransmit timeout using the RTT estimator,
1162 * and put the task to sleep on the pending queue.
1163 */
1164 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1165 {
1166 int timer = task->tk_msg.rpc_proc->p_timer;
1167 struct rpc_clnt *clnt = task->tk_client;
1168 struct rpc_rtt *rtt = clnt->cl_rtt;
1169 struct rpc_rqst *req = task->tk_rqstp;
1170 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1171 unsigned long timeout;
1172
1173 timeout = rpc_calc_rto(rtt, timer);
1174 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1175 if (timeout > max_timeout || timeout == 0)
1176 timeout = max_timeout;
1177 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1178 jiffies + timeout);
1179 }
1180 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1181
1182 /**
1183 * xprt_request_wait_receive - wait for the reply to an RPC request
1184 * @task: RPC task about to send a request
1185 *
1186 */
1187 void xprt_request_wait_receive(struct rpc_task *task)
1188 {
1189 struct rpc_rqst *req = task->tk_rqstp;
1190 struct rpc_xprt *xprt = req->rq_xprt;
1191
1192 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1193 return;
1194 /*
1195 * Sleep on the pending queue if we're expecting a reply.
1196 * The spinlock ensures atomicity between the test of
1197 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1198 */
1199 spin_lock(&xprt->queue_lock);
1200 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1201 xprt->ops->wait_for_reply_request(task);
1202 /*
1203 * Send an extra queue wakeup call if the
1204 * connection was dropped in case the call to
1205 * rpc_sleep_on() raced.
1206 */
1207 if (xprt_request_retransmit_after_disconnect(task))
1208 rpc_wake_up_queued_task_set_status(&xprt->pending,
1209 task, -ENOTCONN);
1210 }
1211 spin_unlock(&xprt->queue_lock);
1212 }
1213
1214 static bool
1215 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1216 {
1217 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1218 }
1219
1220 /**
1221 * xprt_request_enqueue_transmit - queue a task for transmission
1222 * @task: pointer to rpc_task
1223 *
1224 * Add a task to the transmission queue.
1225 */
1226 void
1227 xprt_request_enqueue_transmit(struct rpc_task *task)
1228 {
1229 struct rpc_rqst *pos, *req = task->tk_rqstp;
1230 struct rpc_xprt *xprt = req->rq_xprt;
1231
1232 if (xprt_request_need_enqueue_transmit(task, req)) {
1233 req->rq_bytes_sent = 0;
1234 spin_lock(&xprt->queue_lock);
1235 /*
1236 * Requests that carry congestion control credits are added
1237 * to the head of the list to avoid starvation issues.
1238 */
1239 if (req->rq_cong) {
1240 xprt_clear_congestion_window_wait(xprt);
1241 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1242 if (pos->rq_cong)
1243 continue;
1244 /* Note: req is added _before_ pos */
1245 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1246 INIT_LIST_HEAD(&req->rq_xmit2);
1247 trace_xprt_enq_xmit(task, 1);
1248 goto out;
1249 }
1250 } else if (RPC_IS_SWAPPER(task)) {
1251 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1252 if (pos->rq_cong || pos->rq_bytes_sent)
1253 continue;
1254 if (RPC_IS_SWAPPER(pos->rq_task))
1255 continue;
1256 /* Note: req is added _before_ pos */
1257 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1258 INIT_LIST_HEAD(&req->rq_xmit2);
1259 trace_xprt_enq_xmit(task, 2);
1260 goto out;
1261 }
1262 } else if (!req->rq_seqno) {
1263 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1264 if (pos->rq_task->tk_owner != task->tk_owner)
1265 continue;
1266 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1267 INIT_LIST_HEAD(&req->rq_xmit);
1268 trace_xprt_enq_xmit(task, 3);
1269 goto out;
1270 }
1271 }
1272 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1273 INIT_LIST_HEAD(&req->rq_xmit2);
1274 trace_xprt_enq_xmit(task, 4);
1275 out:
1276 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1277 spin_unlock(&xprt->queue_lock);
1278 }
1279 }
1280
1281 /**
1282 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1283 * @task: pointer to rpc_task
1284 *
1285 * Remove a task from the transmission queue
1286 * Caller must hold xprt->queue_lock
1287 */
1288 static void
1289 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1290 {
1291 struct rpc_rqst *req = task->tk_rqstp;
1292
1293 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1294 return;
1295 if (!list_empty(&req->rq_xmit)) {
1296 list_del(&req->rq_xmit);
1297 if (!list_empty(&req->rq_xmit2)) {
1298 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1299 struct rpc_rqst, rq_xmit2);
1300 list_del(&req->rq_xmit2);
1301 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1302 }
1303 } else
1304 list_del(&req->rq_xmit2);
1305 }
1306
1307 /**
1308 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1309 * @task: pointer to rpc_task
1310 *
1311 * Remove a task from the transmission queue
1312 */
1313 static void
1314 xprt_request_dequeue_transmit(struct rpc_task *task)
1315 {
1316 struct rpc_rqst *req = task->tk_rqstp;
1317 struct rpc_xprt *xprt = req->rq_xprt;
1318
1319 spin_lock(&xprt->queue_lock);
1320 xprt_request_dequeue_transmit_locked(task);
1321 spin_unlock(&xprt->queue_lock);
1322 }
1323
1324 /**
1325 * xprt_request_prepare - prepare an encoded request for transport
1326 * @req: pointer to rpc_rqst
1327 *
1328 * Calls into the transport layer to do whatever is needed to prepare
1329 * the request for transmission or receive.
1330 */
1331 void
1332 xprt_request_prepare(struct rpc_rqst *req)
1333 {
1334 struct rpc_xprt *xprt = req->rq_xprt;
1335
1336 if (xprt->ops->prepare_request)
1337 xprt->ops->prepare_request(req);
1338 }
1339
1340 /**
1341 * xprt_request_need_retransmit - Test if a task needs retransmission
1342 * @task: pointer to rpc_task
1343 *
1344 * Test for whether a connection breakage requires the task to retransmit
1345 */
1346 bool
1347 xprt_request_need_retransmit(struct rpc_task *task)
1348 {
1349 return xprt_request_retransmit_after_disconnect(task);
1350 }
1351
1352 /**
1353 * xprt_prepare_transmit - reserve the transport before sending a request
1354 * @task: RPC task about to send a request
1355 *
1356 */
1357 bool xprt_prepare_transmit(struct rpc_task *task)
1358 {
1359 struct rpc_rqst *req = task->tk_rqstp;
1360 struct rpc_xprt *xprt = req->rq_xprt;
1361
1362 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
1363
1364 if (!xprt_lock_write(xprt, task)) {
1365 /* Race breaker: someone may have transmitted us */
1366 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1367 rpc_wake_up_queued_task_set_status(&xprt->sending,
1368 task, 0);
1369 return false;
1370
1371 }
1372 return true;
1373 }
1374
1375 void xprt_end_transmit(struct rpc_task *task)
1376 {
1377 xprt_release_write(task->tk_rqstp->rq_xprt, task);
1378 }
1379
1380 /**
1381 * xprt_request_transmit - send an RPC request on a transport
1382 * @req: pointer to request to transmit
1383 * @snd_task: RPC task that owns the transport lock
1384 *
1385 * This performs the transmission of a single request.
1386 * Note that if the request is not the same as snd_task, then it
1387 * does need to be pinned.
1388 * Returns '0' on success.
1389 */
1390 static int
1391 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1392 {
1393 struct rpc_xprt *xprt = req->rq_xprt;
1394 struct rpc_task *task = req->rq_task;
1395 unsigned int connect_cookie;
1396 int is_retrans = RPC_WAS_SENT(task);
1397 int status;
1398
1399 if (!req->rq_bytes_sent) {
1400 if (xprt_request_data_received(task)) {
1401 status = 0;
1402 goto out_dequeue;
1403 }
1404 /* Verify that our message lies in the RPCSEC_GSS window */
1405 if (rpcauth_xmit_need_reencode(task)) {
1406 status = -EBADMSG;
1407 goto out_dequeue;
1408 }
1409 if (task->tk_ops->rpc_call_prepare_transmit) {
1410 task->tk_ops->rpc_call_prepare_transmit(task,
1411 task->tk_calldata);
1412 status = task->tk_status;
1413 if (status < 0)
1414 goto out_dequeue;
1415 }
1416 if (RPC_SIGNALLED(task)) {
1417 status = -ERESTARTSYS;
1418 goto out_dequeue;
1419 }
1420 }
1421
1422 /*
1423 * Update req->rq_ntrans before transmitting to avoid races with
1424 * xprt_update_rtt(), which needs to know that it is recording a
1425 * reply to the first transmission.
1426 */
1427 req->rq_ntrans++;
1428
1429 connect_cookie = xprt->connect_cookie;
1430 status = xprt->ops->send_request(req);
1431 if (status != 0) {
1432 req->rq_ntrans--;
1433 trace_xprt_transmit(req, status);
1434 return status;
1435 }
1436
1437 if (is_retrans)
1438 task->tk_client->cl_stats->rpcretrans++;
1439
1440 xprt_inject_disconnect(xprt);
1441
1442 task->tk_flags |= RPC_TASK_SENT;
1443 spin_lock(&xprt->transport_lock);
1444
1445 xprt->stat.sends++;
1446 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1447 xprt->stat.bklog_u += xprt->backlog.qlen;
1448 xprt->stat.sending_u += xprt->sending.qlen;
1449 xprt->stat.pending_u += xprt->pending.qlen;
1450 spin_unlock(&xprt->transport_lock);
1451
1452 req->rq_connect_cookie = connect_cookie;
1453 out_dequeue:
1454 trace_xprt_transmit(req, status);
1455 xprt_request_dequeue_transmit(task);
1456 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1457 return status;
1458 }
1459
1460 /**
1461 * xprt_transmit - send an RPC request on a transport
1462 * @task: controlling RPC task
1463 *
1464 * Attempts to drain the transmit queue. On exit, either the transport
1465 * signalled an error that needs to be handled before transmission can
1466 * resume, or @task finished transmitting, and detected that it already
1467 * received a reply.
1468 */
1469 void
1470 xprt_transmit(struct rpc_task *task)
1471 {
1472 struct rpc_rqst *next, *req = task->tk_rqstp;
1473 struct rpc_xprt *xprt = req->rq_xprt;
1474 int status;
1475
1476 spin_lock(&xprt->queue_lock);
1477 while (!list_empty(&xprt->xmit_queue)) {
1478 next = list_first_entry(&xprt->xmit_queue,
1479 struct rpc_rqst, rq_xmit);
1480 xprt_pin_rqst(next);
1481 spin_unlock(&xprt->queue_lock);
1482 status = xprt_request_transmit(next, task);
1483 if (status == -EBADMSG && next != req)
1484 status = 0;
1485 cond_resched();
1486 spin_lock(&xprt->queue_lock);
1487 xprt_unpin_rqst(next);
1488 if (status == 0) {
1489 if (!xprt_request_data_received(task) ||
1490 test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1491 continue;
1492 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1493 task->tk_status = status;
1494 break;
1495 }
1496 spin_unlock(&xprt->queue_lock);
1497 }
1498
1499 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1500 {
1501 set_bit(XPRT_CONGESTED, &xprt->state);
1502 rpc_sleep_on(&xprt->backlog, task, NULL);
1503 }
1504
1505 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1506 {
1507 if (rpc_wake_up_next(&xprt->backlog) == NULL)
1508 clear_bit(XPRT_CONGESTED, &xprt->state);
1509 }
1510
1511 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1512 {
1513 bool ret = false;
1514
1515 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1516 goto out;
1517 spin_lock(&xprt->reserve_lock);
1518 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1519 rpc_sleep_on(&xprt->backlog, task, NULL);
1520 ret = true;
1521 }
1522 spin_unlock(&xprt->reserve_lock);
1523 out:
1524 return ret;
1525 }
1526
1527 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1528 {
1529 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1530
1531 if (xprt->num_reqs >= xprt->max_reqs)
1532 goto out;
1533 ++xprt->num_reqs;
1534 spin_unlock(&xprt->reserve_lock);
1535 req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1536 spin_lock(&xprt->reserve_lock);
1537 if (req != NULL)
1538 goto out;
1539 --xprt->num_reqs;
1540 req = ERR_PTR(-ENOMEM);
1541 out:
1542 return req;
1543 }
1544
1545 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1546 {
1547 if (xprt->num_reqs > xprt->min_reqs) {
1548 --xprt->num_reqs;
1549 kfree(req);
1550 return true;
1551 }
1552 return false;
1553 }
1554
1555 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1556 {
1557 struct rpc_rqst *req;
1558
1559 spin_lock(&xprt->reserve_lock);
1560 if (!list_empty(&xprt->free)) {
1561 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1562 list_del(&req->rq_list);
1563 goto out_init_req;
1564 }
1565 req = xprt_dynamic_alloc_slot(xprt);
1566 if (!IS_ERR(req))
1567 goto out_init_req;
1568 switch (PTR_ERR(req)) {
1569 case -ENOMEM:
1570 dprintk("RPC: dynamic allocation of request slot "
1571 "failed! Retrying\n");
1572 task->tk_status = -ENOMEM;
1573 break;
1574 case -EAGAIN:
1575 xprt_add_backlog(xprt, task);
1576 dprintk("RPC: waiting for request slot\n");
1577 /* fall through */
1578 default:
1579 task->tk_status = -EAGAIN;
1580 }
1581 spin_unlock(&xprt->reserve_lock);
1582 return;
1583 out_init_req:
1584 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1585 xprt->num_reqs);
1586 spin_unlock(&xprt->reserve_lock);
1587
1588 task->tk_status = 0;
1589 task->tk_rqstp = req;
1590 }
1591 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1592
1593 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1594 {
1595 spin_lock(&xprt->reserve_lock);
1596 if (!xprt_dynamic_free_slot(xprt, req)) {
1597 memset(req, 0, sizeof(*req)); /* mark unused */
1598 list_add(&req->rq_list, &xprt->free);
1599 }
1600 xprt_wake_up_backlog(xprt);
1601 spin_unlock(&xprt->reserve_lock);
1602 }
1603 EXPORT_SYMBOL_GPL(xprt_free_slot);
1604
1605 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1606 {
1607 struct rpc_rqst *req;
1608 while (!list_empty(&xprt->free)) {
1609 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1610 list_del(&req->rq_list);
1611 kfree(req);
1612 }
1613 }
1614
1615 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1616 unsigned int num_prealloc,
1617 unsigned int max_alloc)
1618 {
1619 struct rpc_xprt *xprt;
1620 struct rpc_rqst *req;
1621 int i;
1622
1623 xprt = kzalloc(size, GFP_KERNEL);
1624 if (xprt == NULL)
1625 goto out;
1626
1627 xprt_init(xprt, net);
1628
1629 for (i = 0; i < num_prealloc; i++) {
1630 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1631 if (!req)
1632 goto out_free;
1633 list_add(&req->rq_list, &xprt->free);
1634 }
1635 if (max_alloc > num_prealloc)
1636 xprt->max_reqs = max_alloc;
1637 else
1638 xprt->max_reqs = num_prealloc;
1639 xprt->min_reqs = num_prealloc;
1640 xprt->num_reqs = num_prealloc;
1641
1642 return xprt;
1643
1644 out_free:
1645 xprt_free(xprt);
1646 out:
1647 return NULL;
1648 }
1649 EXPORT_SYMBOL_GPL(xprt_alloc);
1650
1651 void xprt_free(struct rpc_xprt *xprt)
1652 {
1653 put_net(xprt->xprt_net);
1654 xprt_free_all_slots(xprt);
1655 kfree_rcu(xprt, rcu);
1656 }
1657 EXPORT_SYMBOL_GPL(xprt_free);
1658
1659 static void
1660 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1661 {
1662 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1663 }
1664
1665 static __be32
1666 xprt_alloc_xid(struct rpc_xprt *xprt)
1667 {
1668 __be32 xid;
1669
1670 spin_lock(&xprt->reserve_lock);
1671 xid = (__force __be32)xprt->xid++;
1672 spin_unlock(&xprt->reserve_lock);
1673 return xid;
1674 }
1675
1676 static void
1677 xprt_init_xid(struct rpc_xprt *xprt)
1678 {
1679 xprt->xid = prandom_u32();
1680 }
1681
1682 static void
1683 xprt_request_init(struct rpc_task *task)
1684 {
1685 struct rpc_xprt *xprt = task->tk_xprt;
1686 struct rpc_rqst *req = task->tk_rqstp;
1687
1688 req->rq_task = task;
1689 req->rq_xprt = xprt;
1690 req->rq_buffer = NULL;
1691 req->rq_xid = xprt_alloc_xid(xprt);
1692 xprt_init_connect_cookie(req, xprt);
1693 req->rq_snd_buf.len = 0;
1694 req->rq_snd_buf.buflen = 0;
1695 req->rq_rcv_buf.len = 0;
1696 req->rq_rcv_buf.buflen = 0;
1697 req->rq_snd_buf.bvec = NULL;
1698 req->rq_rcv_buf.bvec = NULL;
1699 req->rq_release_snd_buf = NULL;
1700 xprt_init_majortimeo(task, req);
1701 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1702 req, ntohl(req->rq_xid));
1703 }
1704
1705 static void
1706 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1707 {
1708 xprt->ops->alloc_slot(xprt, task);
1709 if (task->tk_rqstp != NULL)
1710 xprt_request_init(task);
1711 }
1712
1713 /**
1714 * xprt_reserve - allocate an RPC request slot
1715 * @task: RPC task requesting a slot allocation
1716 *
1717 * If the transport is marked as being congested, or if no more
1718 * slots are available, place the task on the transport's
1719 * backlog queue.
1720 */
1721 void xprt_reserve(struct rpc_task *task)
1722 {
1723 struct rpc_xprt *xprt = task->tk_xprt;
1724
1725 task->tk_status = 0;
1726 if (task->tk_rqstp != NULL)
1727 return;
1728
1729 task->tk_status = -EAGAIN;
1730 if (!xprt_throttle_congested(xprt, task))
1731 xprt_do_reserve(xprt, task);
1732 }
1733
1734 /**
1735 * xprt_retry_reserve - allocate an RPC request slot
1736 * @task: RPC task requesting a slot allocation
1737 *
1738 * If no more slots are available, place the task on the transport's
1739 * backlog queue.
1740 * Note that the only difference with xprt_reserve is that we now
1741 * ignore the value of the XPRT_CONGESTED flag.
1742 */
1743 void xprt_retry_reserve(struct rpc_task *task)
1744 {
1745 struct rpc_xprt *xprt = task->tk_xprt;
1746
1747 task->tk_status = 0;
1748 if (task->tk_rqstp != NULL)
1749 return;
1750
1751 task->tk_status = -EAGAIN;
1752 xprt_do_reserve(xprt, task);
1753 }
1754
1755 static void
1756 xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req)
1757 {
1758 struct rpc_xprt *xprt = req->rq_xprt;
1759
1760 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1761 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1762 xprt_is_pinned_rqst(req)) {
1763 spin_lock(&xprt->queue_lock);
1764 xprt_request_dequeue_transmit_locked(task);
1765 xprt_request_dequeue_receive_locked(task);
1766 while (xprt_is_pinned_rqst(req)) {
1767 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1768 spin_unlock(&xprt->queue_lock);
1769 xprt_wait_on_pinned_rqst(req);
1770 spin_lock(&xprt->queue_lock);
1771 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1772 }
1773 spin_unlock(&xprt->queue_lock);
1774 }
1775 }
1776
1777 /**
1778 * xprt_release - release an RPC request slot
1779 * @task: task which is finished with the slot
1780 *
1781 */
1782 void xprt_release(struct rpc_task *task)
1783 {
1784 struct rpc_xprt *xprt;
1785 struct rpc_rqst *req = task->tk_rqstp;
1786
1787 if (req == NULL) {
1788 if (task->tk_client) {
1789 xprt = task->tk_xprt;
1790 xprt_release_write(xprt, task);
1791 }
1792 return;
1793 }
1794
1795 xprt = req->rq_xprt;
1796 xprt_request_dequeue_all(task, req);
1797 spin_lock(&xprt->transport_lock);
1798 xprt->ops->release_xprt(xprt, task);
1799 if (xprt->ops->release_request)
1800 xprt->ops->release_request(task);
1801 xprt_schedule_autodisconnect(xprt);
1802 spin_unlock(&xprt->transport_lock);
1803 if (req->rq_buffer)
1804 xprt->ops->buf_free(task);
1805 xprt_inject_disconnect(xprt);
1806 xdr_free_bvec(&req->rq_rcv_buf);
1807 xdr_free_bvec(&req->rq_snd_buf);
1808 if (req->rq_cred != NULL)
1809 put_rpccred(req->rq_cred);
1810 task->tk_rqstp = NULL;
1811 if (req->rq_release_snd_buf)
1812 req->rq_release_snd_buf(req);
1813
1814 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1815 if (likely(!bc_prealloc(req)))
1816 xprt->ops->free_slot(xprt, req);
1817 else
1818 xprt_free_bc_request(req);
1819 }
1820
1821 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1822 void
1823 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1824 {
1825 struct xdr_buf *xbufp = &req->rq_snd_buf;
1826
1827 task->tk_rqstp = req;
1828 req->rq_task = task;
1829 xprt_init_connect_cookie(req, req->rq_xprt);
1830 /*
1831 * Set up the xdr_buf length.
1832 * This also indicates that the buffer is XDR encoded already.
1833 */
1834 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1835 xbufp->tail[0].iov_len;
1836 }
1837 #endif
1838
1839 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1840 {
1841 kref_init(&xprt->kref);
1842
1843 spin_lock_init(&xprt->transport_lock);
1844 spin_lock_init(&xprt->reserve_lock);
1845 spin_lock_init(&xprt->queue_lock);
1846
1847 INIT_LIST_HEAD(&xprt->free);
1848 xprt->recv_queue = RB_ROOT;
1849 INIT_LIST_HEAD(&xprt->xmit_queue);
1850 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1851 spin_lock_init(&xprt->bc_pa_lock);
1852 INIT_LIST_HEAD(&xprt->bc_pa_list);
1853 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1854 INIT_LIST_HEAD(&xprt->xprt_switch);
1855
1856 xprt->last_used = jiffies;
1857 xprt->cwnd = RPC_INITCWND;
1858 xprt->bind_index = 0;
1859
1860 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1861 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1862 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1863 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1864
1865 xprt_init_xid(xprt);
1866
1867 xprt->xprt_net = get_net(net);
1868 }
1869
1870 /**
1871 * xprt_create_transport - create an RPC transport
1872 * @args: rpc transport creation arguments
1873 *
1874 */
1875 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1876 {
1877 struct rpc_xprt *xprt;
1878 struct xprt_class *t;
1879
1880 spin_lock(&xprt_list_lock);
1881 list_for_each_entry(t, &xprt_list, list) {
1882 if (t->ident == args->ident) {
1883 spin_unlock(&xprt_list_lock);
1884 goto found;
1885 }
1886 }
1887 spin_unlock(&xprt_list_lock);
1888 dprintk("RPC: transport (%d) not supported\n", args->ident);
1889 return ERR_PTR(-EIO);
1890
1891 found:
1892 xprt = t->setup(args);
1893 if (IS_ERR(xprt)) {
1894 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1895 -PTR_ERR(xprt));
1896 goto out;
1897 }
1898 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1899 xprt->idle_timeout = 0;
1900 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1901 if (xprt_has_timer(xprt))
1902 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1903 else
1904 timer_setup(&xprt->timer, NULL, 0);
1905
1906 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1907 xprt_destroy(xprt);
1908 return ERR_PTR(-EINVAL);
1909 }
1910 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1911 if (xprt->servername == NULL) {
1912 xprt_destroy(xprt);
1913 return ERR_PTR(-ENOMEM);
1914 }
1915
1916 rpc_xprt_debugfs_register(xprt);
1917
1918 dprintk("RPC: created transport %p with %u slots\n", xprt,
1919 xprt->max_reqs);
1920 out:
1921 return xprt;
1922 }
1923
1924 static void xprt_destroy_cb(struct work_struct *work)
1925 {
1926 struct rpc_xprt *xprt =
1927 container_of(work, struct rpc_xprt, task_cleanup);
1928
1929 rpc_xprt_debugfs_unregister(xprt);
1930 rpc_destroy_wait_queue(&xprt->binding);
1931 rpc_destroy_wait_queue(&xprt->pending);
1932 rpc_destroy_wait_queue(&xprt->sending);
1933 rpc_destroy_wait_queue(&xprt->backlog);
1934 kfree(xprt->servername);
1935 /*
1936 * Tear down transport state and free the rpc_xprt
1937 */
1938 xprt->ops->destroy(xprt);
1939 }
1940
1941 /**
1942 * xprt_destroy - destroy an RPC transport, killing off all requests.
1943 * @xprt: transport to destroy
1944 *
1945 */
1946 static void xprt_destroy(struct rpc_xprt *xprt)
1947 {
1948 dprintk("RPC: destroying transport %p\n", xprt);
1949
1950 /*
1951 * Exclude transport connect/disconnect handlers and autoclose
1952 */
1953 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1954
1955 del_timer_sync(&xprt->timer);
1956
1957 /*
1958 * Destroy sockets etc from the system workqueue so they can
1959 * safely flush receive work running on rpciod.
1960 */
1961 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1962 schedule_work(&xprt->task_cleanup);
1963 }
1964
1965 static void xprt_destroy_kref(struct kref *kref)
1966 {
1967 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1968 }
1969
1970 /**
1971 * xprt_get - return a reference to an RPC transport.
1972 * @xprt: pointer to the transport
1973 *
1974 */
1975 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1976 {
1977 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
1978 return xprt;
1979 return NULL;
1980 }
1981 EXPORT_SYMBOL_GPL(xprt_get);
1982
1983 /**
1984 * xprt_put - release a reference to an RPC transport.
1985 * @xprt: pointer to the transport
1986 *
1987 */
1988 void xprt_put(struct rpc_xprt *xprt)
1989 {
1990 if (xprt != NULL)
1991 kref_put(&xprt->kref, xprt_destroy_kref);
1992 }
1993 EXPORT_SYMBOL_GPL(xprt_put);