]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sunrpc/xprt.c
Merge branches 'acpi-video' and 'acpi-hotplug'
[mirror_ubuntu-bionic-kernel.git] / net / sunrpc / xprt.c
1 /*
2 * linux/net/sunrpc/xprt.c
3 *
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
6 *
7 * The interface works like this:
8 *
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
11 * (xprt_reserve).
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, if a reply is expected,
16 * it installs a timer that is run after the packet's timeout has
17 * expired.
18 * - When a packet arrives, the data_ready handler walks the list of
19 * pending requests for that transport. If a matching XID is found, the
20 * caller is woken up, and the timer removed.
21 * - When no reply arrives within the timeout interval, the timer is
22 * fired by the kernel and runs xprt_timer(). It either adjusts the
23 * timeout values (minor timeout) or wakes up the caller with a status
24 * of -ETIMEDOUT.
25 * - When the caller receives a notification from RPC that a reply arrived,
26 * it should release the RPC slot, and process the reply.
27 * If the call timed out, it may choose to retry the operation by
28 * adjusting the initial timeout value, and simply calling rpc_call
29 * again.
30 *
31 * Support for async RPC is done through a set of RPC-specific scheduling
32 * primitives that `transparently' work for processes as well as async
33 * tasks that rely on callbacks.
34 *
35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 *
37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
38 */
39
40 #include <linux/module.h>
41
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
47
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
51
52 #include <trace/events/sunrpc.h>
53
54 #include "sunrpc.h"
55
56 /*
57 * Local variables
58 */
59
60 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
61 # define RPCDBG_FACILITY RPCDBG_XPRT
62 #endif
63
64 /*
65 * Local functions
66 */
67 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
68 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
69 static void xprt_connect_status(struct rpc_task *task);
70 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
71 static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
72 static void xprt_destroy(struct rpc_xprt *xprt);
73
74 static DEFINE_SPINLOCK(xprt_list_lock);
75 static LIST_HEAD(xprt_list);
76
77 /**
78 * xprt_register_transport - register a transport implementation
79 * @transport: transport to register
80 *
81 * If a transport implementation is loaded as a kernel module, it can
82 * call this interface to make itself known to the RPC client.
83 *
84 * Returns:
85 * 0: transport successfully registered
86 * -EEXIST: transport already registered
87 * -EINVAL: transport module being unloaded
88 */
89 int xprt_register_transport(struct xprt_class *transport)
90 {
91 struct xprt_class *t;
92 int result;
93
94 result = -EEXIST;
95 spin_lock(&xprt_list_lock);
96 list_for_each_entry(t, &xprt_list, list) {
97 /* don't register the same transport class twice */
98 if (t->ident == transport->ident)
99 goto out;
100 }
101
102 list_add_tail(&transport->list, &xprt_list);
103 printk(KERN_INFO "RPC: Registered %s transport module.\n",
104 transport->name);
105 result = 0;
106
107 out:
108 spin_unlock(&xprt_list_lock);
109 return result;
110 }
111 EXPORT_SYMBOL_GPL(xprt_register_transport);
112
113 /**
114 * xprt_unregister_transport - unregister a transport implementation
115 * @transport: transport to unregister
116 *
117 * Returns:
118 * 0: transport successfully unregistered
119 * -ENOENT: transport never registered
120 */
121 int xprt_unregister_transport(struct xprt_class *transport)
122 {
123 struct xprt_class *t;
124 int result;
125
126 result = 0;
127 spin_lock(&xprt_list_lock);
128 list_for_each_entry(t, &xprt_list, list) {
129 if (t == transport) {
130 printk(KERN_INFO
131 "RPC: Unregistered %s transport module.\n",
132 transport->name);
133 list_del_init(&transport->list);
134 goto out;
135 }
136 }
137 result = -ENOENT;
138
139 out:
140 spin_unlock(&xprt_list_lock);
141 return result;
142 }
143 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
144
145 /**
146 * xprt_load_transport - load a transport implementation
147 * @transport_name: transport to load
148 *
149 * Returns:
150 * 0: transport successfully loaded
151 * -ENOENT: transport module not available
152 */
153 int xprt_load_transport(const char *transport_name)
154 {
155 struct xprt_class *t;
156 int result;
157
158 result = 0;
159 spin_lock(&xprt_list_lock);
160 list_for_each_entry(t, &xprt_list, list) {
161 if (strcmp(t->name, transport_name) == 0) {
162 spin_unlock(&xprt_list_lock);
163 goto out;
164 }
165 }
166 spin_unlock(&xprt_list_lock);
167 result = request_module("xprt%s", transport_name);
168 out:
169 return result;
170 }
171 EXPORT_SYMBOL_GPL(xprt_load_transport);
172
173 /**
174 * xprt_reserve_xprt - serialize write access to transports
175 * @task: task that is requesting access to the transport
176 * @xprt: pointer to the target transport
177 *
178 * This prevents mixing the payload of separate requests, and prevents
179 * transport connects from colliding with writes. No congestion control
180 * is provided.
181 */
182 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
183 {
184 struct rpc_rqst *req = task->tk_rqstp;
185 int priority;
186
187 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
188 if (task == xprt->snd_task)
189 return 1;
190 goto out_sleep;
191 }
192 xprt->snd_task = task;
193 if (req != NULL)
194 req->rq_ntrans++;
195
196 return 1;
197
198 out_sleep:
199 dprintk("RPC: %5u failed to lock transport %p\n",
200 task->tk_pid, xprt);
201 task->tk_timeout = 0;
202 task->tk_status = -EAGAIN;
203 if (req == NULL)
204 priority = RPC_PRIORITY_LOW;
205 else if (!req->rq_ntrans)
206 priority = RPC_PRIORITY_NORMAL;
207 else
208 priority = RPC_PRIORITY_HIGH;
209 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
210 return 0;
211 }
212 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
213
214 static void xprt_clear_locked(struct rpc_xprt *xprt)
215 {
216 xprt->snd_task = NULL;
217 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
218 smp_mb__before_atomic();
219 clear_bit(XPRT_LOCKED, &xprt->state);
220 smp_mb__after_atomic();
221 } else
222 queue_work(rpciod_workqueue, &xprt->task_cleanup);
223 }
224
225 /*
226 * xprt_reserve_xprt_cong - serialize write access to transports
227 * @task: task that is requesting access to the transport
228 *
229 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
230 * integrated into the decision of whether a request is allowed to be
231 * woken up and given access to the transport.
232 */
233 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
234 {
235 struct rpc_rqst *req = task->tk_rqstp;
236 int priority;
237
238 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
239 if (task == xprt->snd_task)
240 return 1;
241 goto out_sleep;
242 }
243 if (req == NULL) {
244 xprt->snd_task = task;
245 return 1;
246 }
247 if (__xprt_get_cong(xprt, task)) {
248 xprt->snd_task = task;
249 req->rq_ntrans++;
250 return 1;
251 }
252 xprt_clear_locked(xprt);
253 out_sleep:
254 if (req)
255 __xprt_put_cong(xprt, req);
256 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
257 task->tk_timeout = 0;
258 task->tk_status = -EAGAIN;
259 if (req == NULL)
260 priority = RPC_PRIORITY_LOW;
261 else if (!req->rq_ntrans)
262 priority = RPC_PRIORITY_NORMAL;
263 else
264 priority = RPC_PRIORITY_HIGH;
265 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
266 return 0;
267 }
268 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
269
270 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
271 {
272 int retval;
273
274 spin_lock_bh(&xprt->transport_lock);
275 retval = xprt->ops->reserve_xprt(xprt, task);
276 spin_unlock_bh(&xprt->transport_lock);
277 return retval;
278 }
279
280 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
281 {
282 struct rpc_xprt *xprt = data;
283 struct rpc_rqst *req;
284
285 req = task->tk_rqstp;
286 xprt->snd_task = task;
287 if (req)
288 req->rq_ntrans++;
289 return true;
290 }
291
292 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
293 {
294 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
295 return;
296
297 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
298 return;
299 xprt_clear_locked(xprt);
300 }
301
302 static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
303 {
304 struct rpc_xprt *xprt = data;
305 struct rpc_rqst *req;
306
307 req = task->tk_rqstp;
308 if (req == NULL) {
309 xprt->snd_task = task;
310 return true;
311 }
312 if (__xprt_get_cong(xprt, task)) {
313 xprt->snd_task = task;
314 req->rq_ntrans++;
315 return true;
316 }
317 return false;
318 }
319
320 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
321 {
322 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
323 return;
324 if (RPCXPRT_CONGESTED(xprt))
325 goto out_unlock;
326 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
327 return;
328 out_unlock:
329 xprt_clear_locked(xprt);
330 }
331
332 static void xprt_task_clear_bytes_sent(struct rpc_task *task)
333 {
334 if (task != NULL) {
335 struct rpc_rqst *req = task->tk_rqstp;
336 if (req != NULL)
337 req->rq_bytes_sent = 0;
338 }
339 }
340
341 /**
342 * xprt_release_xprt - allow other requests to use a transport
343 * @xprt: transport with other tasks potentially waiting
344 * @task: task that is releasing access to the transport
345 *
346 * Note that "task" can be NULL. No congestion control is provided.
347 */
348 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
349 {
350 if (xprt->snd_task == task) {
351 xprt_task_clear_bytes_sent(task);
352 xprt_clear_locked(xprt);
353 __xprt_lock_write_next(xprt);
354 }
355 }
356 EXPORT_SYMBOL_GPL(xprt_release_xprt);
357
358 /**
359 * xprt_release_xprt_cong - allow other requests to use a transport
360 * @xprt: transport with other tasks potentially waiting
361 * @task: task that is releasing access to the transport
362 *
363 * Note that "task" can be NULL. Another task is awoken to use the
364 * transport if the transport's congestion window allows it.
365 */
366 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
367 {
368 if (xprt->snd_task == task) {
369 xprt_task_clear_bytes_sent(task);
370 xprt_clear_locked(xprt);
371 __xprt_lock_write_next_cong(xprt);
372 }
373 }
374 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
375
376 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
377 {
378 spin_lock_bh(&xprt->transport_lock);
379 xprt->ops->release_xprt(xprt, task);
380 spin_unlock_bh(&xprt->transport_lock);
381 }
382
383 /*
384 * Van Jacobson congestion avoidance. Check if the congestion window
385 * overflowed. Put the task to sleep if this is the case.
386 */
387 static int
388 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
389 {
390 struct rpc_rqst *req = task->tk_rqstp;
391
392 if (req->rq_cong)
393 return 1;
394 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
395 task->tk_pid, xprt->cong, xprt->cwnd);
396 if (RPCXPRT_CONGESTED(xprt))
397 return 0;
398 req->rq_cong = 1;
399 xprt->cong += RPC_CWNDSCALE;
400 return 1;
401 }
402
403 /*
404 * Adjust the congestion window, and wake up the next task
405 * that has been sleeping due to congestion
406 */
407 static void
408 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
409 {
410 if (!req->rq_cong)
411 return;
412 req->rq_cong = 0;
413 xprt->cong -= RPC_CWNDSCALE;
414 __xprt_lock_write_next_cong(xprt);
415 }
416
417 /**
418 * xprt_release_rqst_cong - housekeeping when request is complete
419 * @task: RPC request that recently completed
420 *
421 * Useful for transports that require congestion control.
422 */
423 void xprt_release_rqst_cong(struct rpc_task *task)
424 {
425 struct rpc_rqst *req = task->tk_rqstp;
426
427 __xprt_put_cong(req->rq_xprt, req);
428 }
429 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
430
431 /**
432 * xprt_adjust_cwnd - adjust transport congestion window
433 * @xprt: pointer to xprt
434 * @task: recently completed RPC request used to adjust window
435 * @result: result code of completed RPC request
436 *
437 * The transport code maintains an estimate on the maximum number of out-
438 * standing RPC requests, using a smoothed version of the congestion
439 * avoidance implemented in 44BSD. This is basically the Van Jacobson
440 * congestion algorithm: If a retransmit occurs, the congestion window is
441 * halved; otherwise, it is incremented by 1/cwnd when
442 *
443 * - a reply is received and
444 * - a full number of requests are outstanding and
445 * - the congestion window hasn't been updated recently.
446 */
447 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
448 {
449 struct rpc_rqst *req = task->tk_rqstp;
450 unsigned long cwnd = xprt->cwnd;
451
452 if (result >= 0 && cwnd <= xprt->cong) {
453 /* The (cwnd >> 1) term makes sure
454 * the result gets rounded properly. */
455 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
456 if (cwnd > RPC_MAXCWND(xprt))
457 cwnd = RPC_MAXCWND(xprt);
458 __xprt_lock_write_next_cong(xprt);
459 } else if (result == -ETIMEDOUT) {
460 cwnd >>= 1;
461 if (cwnd < RPC_CWNDSCALE)
462 cwnd = RPC_CWNDSCALE;
463 }
464 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
465 xprt->cong, xprt->cwnd, cwnd);
466 xprt->cwnd = cwnd;
467 __xprt_put_cong(xprt, req);
468 }
469 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
470
471 /**
472 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
473 * @xprt: transport with waiting tasks
474 * @status: result code to plant in each task before waking it
475 *
476 */
477 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
478 {
479 if (status < 0)
480 rpc_wake_up_status(&xprt->pending, status);
481 else
482 rpc_wake_up(&xprt->pending);
483 }
484 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
485
486 /**
487 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
488 * @task: task to be put to sleep
489 * @action: function pointer to be executed after wait
490 *
491 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
492 * we don't in general want to force a socket disconnection due to
493 * an incomplete RPC call transmission.
494 */
495 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
496 {
497 struct rpc_rqst *req = task->tk_rqstp;
498 struct rpc_xprt *xprt = req->rq_xprt;
499
500 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
501 rpc_sleep_on(&xprt->pending, task, action);
502 }
503 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
504
505 /**
506 * xprt_write_space - wake the task waiting for transport output buffer space
507 * @xprt: transport with waiting tasks
508 *
509 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
510 */
511 void xprt_write_space(struct rpc_xprt *xprt)
512 {
513 spin_lock_bh(&xprt->transport_lock);
514 if (xprt->snd_task) {
515 dprintk("RPC: write space: waking waiting task on "
516 "xprt %p\n", xprt);
517 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
518 }
519 spin_unlock_bh(&xprt->transport_lock);
520 }
521 EXPORT_SYMBOL_GPL(xprt_write_space);
522
523 /**
524 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
525 * @task: task whose timeout is to be set
526 *
527 * Set a request's retransmit timeout based on the transport's
528 * default timeout parameters. Used by transports that don't adjust
529 * the retransmit timeout based on round-trip time estimation.
530 */
531 void xprt_set_retrans_timeout_def(struct rpc_task *task)
532 {
533 task->tk_timeout = task->tk_rqstp->rq_timeout;
534 }
535 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
536
537 /**
538 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
539 * @task: task whose timeout is to be set
540 *
541 * Set a request's retransmit timeout using the RTT estimator.
542 */
543 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
544 {
545 int timer = task->tk_msg.rpc_proc->p_timer;
546 struct rpc_clnt *clnt = task->tk_client;
547 struct rpc_rtt *rtt = clnt->cl_rtt;
548 struct rpc_rqst *req = task->tk_rqstp;
549 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
550
551 task->tk_timeout = rpc_calc_rto(rtt, timer);
552 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
553 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
554 task->tk_timeout = max_timeout;
555 }
556 EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
557
558 static void xprt_reset_majortimeo(struct rpc_rqst *req)
559 {
560 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
561
562 req->rq_majortimeo = req->rq_timeout;
563 if (to->to_exponential)
564 req->rq_majortimeo <<= to->to_retries;
565 else
566 req->rq_majortimeo += to->to_increment * to->to_retries;
567 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
568 req->rq_majortimeo = to->to_maxval;
569 req->rq_majortimeo += jiffies;
570 }
571
572 /**
573 * xprt_adjust_timeout - adjust timeout values for next retransmit
574 * @req: RPC request containing parameters to use for the adjustment
575 *
576 */
577 int xprt_adjust_timeout(struct rpc_rqst *req)
578 {
579 struct rpc_xprt *xprt = req->rq_xprt;
580 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
581 int status = 0;
582
583 if (time_before(jiffies, req->rq_majortimeo)) {
584 if (to->to_exponential)
585 req->rq_timeout <<= 1;
586 else
587 req->rq_timeout += to->to_increment;
588 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
589 req->rq_timeout = to->to_maxval;
590 req->rq_retries++;
591 } else {
592 req->rq_timeout = to->to_initval;
593 req->rq_retries = 0;
594 xprt_reset_majortimeo(req);
595 /* Reset the RTT counters == "slow start" */
596 spin_lock_bh(&xprt->transport_lock);
597 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
598 spin_unlock_bh(&xprt->transport_lock);
599 status = -ETIMEDOUT;
600 }
601
602 if (req->rq_timeout == 0) {
603 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
604 req->rq_timeout = 5 * HZ;
605 }
606 return status;
607 }
608
609 static void xprt_autoclose(struct work_struct *work)
610 {
611 struct rpc_xprt *xprt =
612 container_of(work, struct rpc_xprt, task_cleanup);
613
614 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
615 xprt->ops->close(xprt);
616 xprt_release_write(xprt, NULL);
617 wake_up_bit(&xprt->state, XPRT_LOCKED);
618 }
619
620 /**
621 * xprt_disconnect_done - mark a transport as disconnected
622 * @xprt: transport to flag for disconnect
623 *
624 */
625 void xprt_disconnect_done(struct rpc_xprt *xprt)
626 {
627 dprintk("RPC: disconnected transport %p\n", xprt);
628 spin_lock_bh(&xprt->transport_lock);
629 xprt_clear_connected(xprt);
630 xprt_wake_pending_tasks(xprt, -EAGAIN);
631 spin_unlock_bh(&xprt->transport_lock);
632 }
633 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
634
635 /**
636 * xprt_force_disconnect - force a transport to disconnect
637 * @xprt: transport to disconnect
638 *
639 */
640 void xprt_force_disconnect(struct rpc_xprt *xprt)
641 {
642 /* Don't race with the test_bit() in xprt_clear_locked() */
643 spin_lock_bh(&xprt->transport_lock);
644 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
645 /* Try to schedule an autoclose RPC call */
646 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
647 queue_work(rpciod_workqueue, &xprt->task_cleanup);
648 xprt_wake_pending_tasks(xprt, -EAGAIN);
649 spin_unlock_bh(&xprt->transport_lock);
650 }
651
652 /**
653 * xprt_conditional_disconnect - force a transport to disconnect
654 * @xprt: transport to disconnect
655 * @cookie: 'connection cookie'
656 *
657 * This attempts to break the connection if and only if 'cookie' matches
658 * the current transport 'connection cookie'. It ensures that we don't
659 * try to break the connection more than once when we need to retransmit
660 * a batch of RPC requests.
661 *
662 */
663 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
664 {
665 /* Don't race with the test_bit() in xprt_clear_locked() */
666 spin_lock_bh(&xprt->transport_lock);
667 if (cookie != xprt->connect_cookie)
668 goto out;
669 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
670 goto out;
671 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
672 /* Try to schedule an autoclose RPC call */
673 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
674 queue_work(rpciod_workqueue, &xprt->task_cleanup);
675 xprt_wake_pending_tasks(xprt, -EAGAIN);
676 out:
677 spin_unlock_bh(&xprt->transport_lock);
678 }
679
680 static void
681 xprt_init_autodisconnect(unsigned long data)
682 {
683 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
684
685 spin_lock(&xprt->transport_lock);
686 if (!list_empty(&xprt->recv))
687 goto out_abort;
688 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
689 goto out_abort;
690 spin_unlock(&xprt->transport_lock);
691 queue_work(rpciod_workqueue, &xprt->task_cleanup);
692 return;
693 out_abort:
694 spin_unlock(&xprt->transport_lock);
695 }
696
697 bool xprt_lock_connect(struct rpc_xprt *xprt,
698 struct rpc_task *task,
699 void *cookie)
700 {
701 bool ret = false;
702
703 spin_lock_bh(&xprt->transport_lock);
704 if (!test_bit(XPRT_LOCKED, &xprt->state))
705 goto out;
706 if (xprt->snd_task != task)
707 goto out;
708 xprt_task_clear_bytes_sent(task);
709 xprt->snd_task = cookie;
710 ret = true;
711 out:
712 spin_unlock_bh(&xprt->transport_lock);
713 return ret;
714 }
715
716 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
717 {
718 spin_lock_bh(&xprt->transport_lock);
719 if (xprt->snd_task != cookie)
720 goto out;
721 if (!test_bit(XPRT_LOCKED, &xprt->state))
722 goto out;
723 xprt->snd_task =NULL;
724 xprt->ops->release_xprt(xprt, NULL);
725 out:
726 spin_unlock_bh(&xprt->transport_lock);
727 wake_up_bit(&xprt->state, XPRT_LOCKED);
728 }
729
730 /**
731 * xprt_connect - schedule a transport connect operation
732 * @task: RPC task that is requesting the connect
733 *
734 */
735 void xprt_connect(struct rpc_task *task)
736 {
737 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
738
739 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
740 xprt, (xprt_connected(xprt) ? "is" : "is not"));
741
742 if (!xprt_bound(xprt)) {
743 task->tk_status = -EAGAIN;
744 return;
745 }
746 if (!xprt_lock_write(xprt, task))
747 return;
748
749 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
750 xprt->ops->close(xprt);
751
752 if (!xprt_connected(xprt)) {
753 task->tk_rqstp->rq_bytes_sent = 0;
754 task->tk_timeout = task->tk_rqstp->rq_timeout;
755 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
756
757 if (test_bit(XPRT_CLOSING, &xprt->state))
758 return;
759 if (xprt_test_and_set_connecting(xprt))
760 return;
761 xprt->stat.connect_start = jiffies;
762 xprt->ops->connect(xprt, task);
763 }
764 xprt_release_write(xprt, task);
765 }
766
767 static void xprt_connect_status(struct rpc_task *task)
768 {
769 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
770
771 if (task->tk_status == 0) {
772 xprt->stat.connect_count++;
773 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
774 dprintk("RPC: %5u xprt_connect_status: connection established\n",
775 task->tk_pid);
776 return;
777 }
778
779 switch (task->tk_status) {
780 case -ECONNREFUSED:
781 case -ECONNRESET:
782 case -ECONNABORTED:
783 case -ENETUNREACH:
784 case -EHOSTUNREACH:
785 case -EPIPE:
786 case -EAGAIN:
787 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
788 break;
789 case -ETIMEDOUT:
790 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
791 "out\n", task->tk_pid);
792 break;
793 default:
794 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
795 "server %s\n", task->tk_pid, -task->tk_status,
796 xprt->servername);
797 task->tk_status = -EIO;
798 }
799 }
800
801 /**
802 * xprt_lookup_rqst - find an RPC request corresponding to an XID
803 * @xprt: transport on which the original request was transmitted
804 * @xid: RPC XID of incoming reply
805 *
806 */
807 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
808 {
809 struct rpc_rqst *entry;
810
811 list_for_each_entry(entry, &xprt->recv, rq_list)
812 if (entry->rq_xid == xid) {
813 trace_xprt_lookup_rqst(xprt, xid, 0);
814 return entry;
815 }
816
817 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
818 ntohl(xid));
819 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
820 xprt->stat.bad_xids++;
821 return NULL;
822 }
823 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
824
825 static void xprt_update_rtt(struct rpc_task *task)
826 {
827 struct rpc_rqst *req = task->tk_rqstp;
828 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
829 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
830 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
831
832 if (timer) {
833 if (req->rq_ntrans == 1)
834 rpc_update_rtt(rtt, timer, m);
835 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
836 }
837 }
838
839 /**
840 * xprt_complete_rqst - called when reply processing is complete
841 * @task: RPC request that recently completed
842 * @copied: actual number of bytes received from the transport
843 *
844 * Caller holds transport lock.
845 */
846 void xprt_complete_rqst(struct rpc_task *task, int copied)
847 {
848 struct rpc_rqst *req = task->tk_rqstp;
849 struct rpc_xprt *xprt = req->rq_xprt;
850
851 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
852 task->tk_pid, ntohl(req->rq_xid), copied);
853 trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
854
855 xprt->stat.recvs++;
856 req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
857 if (xprt->ops->timer != NULL)
858 xprt_update_rtt(task);
859
860 list_del_init(&req->rq_list);
861 req->rq_private_buf.len = copied;
862 /* Ensure all writes are done before we update */
863 /* req->rq_reply_bytes_recvd */
864 smp_wmb();
865 req->rq_reply_bytes_recvd = copied;
866 rpc_wake_up_queued_task(&xprt->pending, task);
867 }
868 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
869
870 static void xprt_timer(struct rpc_task *task)
871 {
872 struct rpc_rqst *req = task->tk_rqstp;
873 struct rpc_xprt *xprt = req->rq_xprt;
874
875 if (task->tk_status != -ETIMEDOUT)
876 return;
877 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
878
879 spin_lock_bh(&xprt->transport_lock);
880 if (!req->rq_reply_bytes_recvd) {
881 if (xprt->ops->timer)
882 xprt->ops->timer(xprt, task);
883 } else
884 task->tk_status = 0;
885 spin_unlock_bh(&xprt->transport_lock);
886 }
887
888 static inline int xprt_has_timer(struct rpc_xprt *xprt)
889 {
890 return xprt->idle_timeout != 0;
891 }
892
893 /**
894 * xprt_prepare_transmit - reserve the transport before sending a request
895 * @task: RPC task about to send a request
896 *
897 */
898 bool xprt_prepare_transmit(struct rpc_task *task)
899 {
900 struct rpc_rqst *req = task->tk_rqstp;
901 struct rpc_xprt *xprt = req->rq_xprt;
902 bool ret = false;
903
904 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
905
906 spin_lock_bh(&xprt->transport_lock);
907 if (!req->rq_bytes_sent) {
908 if (req->rq_reply_bytes_recvd) {
909 task->tk_status = req->rq_reply_bytes_recvd;
910 goto out_unlock;
911 }
912 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
913 && xprt_connected(xprt)
914 && req->rq_connect_cookie == xprt->connect_cookie) {
915 xprt->ops->set_retrans_timeout(task);
916 rpc_sleep_on(&xprt->pending, task, xprt_timer);
917 goto out_unlock;
918 }
919 }
920 if (!xprt->ops->reserve_xprt(xprt, task)) {
921 task->tk_status = -EAGAIN;
922 goto out_unlock;
923 }
924 ret = true;
925 out_unlock:
926 spin_unlock_bh(&xprt->transport_lock);
927 return ret;
928 }
929
930 void xprt_end_transmit(struct rpc_task *task)
931 {
932 xprt_release_write(task->tk_rqstp->rq_xprt, task);
933 }
934
935 /**
936 * xprt_transmit - send an RPC request on a transport
937 * @task: controlling RPC task
938 *
939 * We have to copy the iovec because sendmsg fiddles with its contents.
940 */
941 void xprt_transmit(struct rpc_task *task)
942 {
943 struct rpc_rqst *req = task->tk_rqstp;
944 struct rpc_xprt *xprt = req->rq_xprt;
945 int status, numreqs;
946
947 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
948
949 if (!req->rq_reply_bytes_recvd) {
950 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
951 /*
952 * Add to the list only if we're expecting a reply
953 */
954 spin_lock_bh(&xprt->transport_lock);
955 /* Update the softirq receive buffer */
956 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
957 sizeof(req->rq_private_buf));
958 /* Add request to the receive list */
959 list_add_tail(&req->rq_list, &xprt->recv);
960 spin_unlock_bh(&xprt->transport_lock);
961 xprt_reset_majortimeo(req);
962 /* Turn off autodisconnect */
963 del_singleshot_timer_sync(&xprt->timer);
964 }
965 } else if (!req->rq_bytes_sent)
966 return;
967
968 req->rq_xtime = ktime_get();
969 status = xprt->ops->send_request(task);
970 trace_xprt_transmit(xprt, req->rq_xid, status);
971 if (status != 0) {
972 task->tk_status = status;
973 return;
974 }
975 xprt_inject_disconnect(xprt);
976
977 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
978 task->tk_flags |= RPC_TASK_SENT;
979 spin_lock_bh(&xprt->transport_lock);
980
981 xprt->ops->set_retrans_timeout(task);
982
983 numreqs = atomic_read(&xprt->num_reqs);
984 if (numreqs > xprt->stat.max_slots)
985 xprt->stat.max_slots = numreqs;
986 xprt->stat.sends++;
987 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
988 xprt->stat.bklog_u += xprt->backlog.qlen;
989 xprt->stat.sending_u += xprt->sending.qlen;
990 xprt->stat.pending_u += xprt->pending.qlen;
991
992 /* Don't race with disconnect */
993 if (!xprt_connected(xprt))
994 task->tk_status = -ENOTCONN;
995 else {
996 /*
997 * Sleep on the pending queue since
998 * we're expecting a reply.
999 */
1000 if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
1001 rpc_sleep_on(&xprt->pending, task, xprt_timer);
1002 req->rq_connect_cookie = xprt->connect_cookie;
1003 }
1004 spin_unlock_bh(&xprt->transport_lock);
1005 }
1006
1007 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1008 {
1009 set_bit(XPRT_CONGESTED, &xprt->state);
1010 rpc_sleep_on(&xprt->backlog, task, NULL);
1011 }
1012
1013 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1014 {
1015 if (rpc_wake_up_next(&xprt->backlog) == NULL)
1016 clear_bit(XPRT_CONGESTED, &xprt->state);
1017 }
1018
1019 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1020 {
1021 bool ret = false;
1022
1023 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1024 goto out;
1025 spin_lock(&xprt->reserve_lock);
1026 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1027 rpc_sleep_on(&xprt->backlog, task, NULL);
1028 ret = true;
1029 }
1030 spin_unlock(&xprt->reserve_lock);
1031 out:
1032 return ret;
1033 }
1034
1035 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
1036 {
1037 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1038
1039 if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
1040 goto out;
1041 req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
1042 if (req != NULL)
1043 goto out;
1044 atomic_dec(&xprt->num_reqs);
1045 req = ERR_PTR(-ENOMEM);
1046 out:
1047 return req;
1048 }
1049
1050 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1051 {
1052 if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
1053 kfree(req);
1054 return true;
1055 }
1056 return false;
1057 }
1058
1059 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1060 {
1061 struct rpc_rqst *req;
1062
1063 spin_lock(&xprt->reserve_lock);
1064 if (!list_empty(&xprt->free)) {
1065 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1066 list_del(&req->rq_list);
1067 goto out_init_req;
1068 }
1069 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1070 if (!IS_ERR(req))
1071 goto out_init_req;
1072 switch (PTR_ERR(req)) {
1073 case -ENOMEM:
1074 dprintk("RPC: dynamic allocation of request slot "
1075 "failed! Retrying\n");
1076 task->tk_status = -ENOMEM;
1077 break;
1078 case -EAGAIN:
1079 xprt_add_backlog(xprt, task);
1080 dprintk("RPC: waiting for request slot\n");
1081 default:
1082 task->tk_status = -EAGAIN;
1083 }
1084 spin_unlock(&xprt->reserve_lock);
1085 return;
1086 out_init_req:
1087 task->tk_status = 0;
1088 task->tk_rqstp = req;
1089 xprt_request_init(task, xprt);
1090 spin_unlock(&xprt->reserve_lock);
1091 }
1092 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1093
1094 void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1095 {
1096 /* Note: grabbing the xprt_lock_write() ensures that we throttle
1097 * new slot allocation if the transport is congested (i.e. when
1098 * reconnecting a stream transport or when out of socket write
1099 * buffer space).
1100 */
1101 if (xprt_lock_write(xprt, task)) {
1102 xprt_alloc_slot(xprt, task);
1103 xprt_release_write(xprt, task);
1104 }
1105 }
1106 EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1107
1108 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1109 {
1110 spin_lock(&xprt->reserve_lock);
1111 if (!xprt_dynamic_free_slot(xprt, req)) {
1112 memset(req, 0, sizeof(*req)); /* mark unused */
1113 list_add(&req->rq_list, &xprt->free);
1114 }
1115 xprt_wake_up_backlog(xprt);
1116 spin_unlock(&xprt->reserve_lock);
1117 }
1118
1119 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1120 {
1121 struct rpc_rqst *req;
1122 while (!list_empty(&xprt->free)) {
1123 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1124 list_del(&req->rq_list);
1125 kfree(req);
1126 }
1127 }
1128
1129 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1130 unsigned int num_prealloc,
1131 unsigned int max_alloc)
1132 {
1133 struct rpc_xprt *xprt;
1134 struct rpc_rqst *req;
1135 int i;
1136
1137 xprt = kzalloc(size, GFP_KERNEL);
1138 if (xprt == NULL)
1139 goto out;
1140
1141 xprt_init(xprt, net);
1142
1143 for (i = 0; i < num_prealloc; i++) {
1144 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1145 if (!req)
1146 goto out_free;
1147 list_add(&req->rq_list, &xprt->free);
1148 }
1149 if (max_alloc > num_prealloc)
1150 xprt->max_reqs = max_alloc;
1151 else
1152 xprt->max_reqs = num_prealloc;
1153 xprt->min_reqs = num_prealloc;
1154 atomic_set(&xprt->num_reqs, num_prealloc);
1155
1156 return xprt;
1157
1158 out_free:
1159 xprt_free(xprt);
1160 out:
1161 return NULL;
1162 }
1163 EXPORT_SYMBOL_GPL(xprt_alloc);
1164
1165 void xprt_free(struct rpc_xprt *xprt)
1166 {
1167 put_net(xprt->xprt_net);
1168 xprt_free_all_slots(xprt);
1169 kfree(xprt);
1170 }
1171 EXPORT_SYMBOL_GPL(xprt_free);
1172
1173 /**
1174 * xprt_reserve - allocate an RPC request slot
1175 * @task: RPC task requesting a slot allocation
1176 *
1177 * If the transport is marked as being congested, or if no more
1178 * slots are available, place the task on the transport's
1179 * backlog queue.
1180 */
1181 void xprt_reserve(struct rpc_task *task)
1182 {
1183 struct rpc_xprt *xprt;
1184
1185 task->tk_status = 0;
1186 if (task->tk_rqstp != NULL)
1187 return;
1188
1189 task->tk_timeout = 0;
1190 task->tk_status = -EAGAIN;
1191 rcu_read_lock();
1192 xprt = rcu_dereference(task->tk_client->cl_xprt);
1193 if (!xprt_throttle_congested(xprt, task))
1194 xprt->ops->alloc_slot(xprt, task);
1195 rcu_read_unlock();
1196 }
1197
1198 /**
1199 * xprt_retry_reserve - allocate an RPC request slot
1200 * @task: RPC task requesting a slot allocation
1201 *
1202 * If no more slots are available, place the task on the transport's
1203 * backlog queue.
1204 * Note that the only difference with xprt_reserve is that we now
1205 * ignore the value of the XPRT_CONGESTED flag.
1206 */
1207 void xprt_retry_reserve(struct rpc_task *task)
1208 {
1209 struct rpc_xprt *xprt;
1210
1211 task->tk_status = 0;
1212 if (task->tk_rqstp != NULL)
1213 return;
1214
1215 task->tk_timeout = 0;
1216 task->tk_status = -EAGAIN;
1217 rcu_read_lock();
1218 xprt = rcu_dereference(task->tk_client->cl_xprt);
1219 xprt->ops->alloc_slot(xprt, task);
1220 rcu_read_unlock();
1221 }
1222
1223 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1224 {
1225 return (__force __be32)xprt->xid++;
1226 }
1227
1228 static inline void xprt_init_xid(struct rpc_xprt *xprt)
1229 {
1230 xprt->xid = prandom_u32();
1231 }
1232
1233 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1234 {
1235 struct rpc_rqst *req = task->tk_rqstp;
1236
1237 INIT_LIST_HEAD(&req->rq_list);
1238 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1239 req->rq_task = task;
1240 req->rq_xprt = xprt;
1241 req->rq_buffer = NULL;
1242 req->rq_xid = xprt_alloc_xid(xprt);
1243 req->rq_connect_cookie = xprt->connect_cookie - 1;
1244 req->rq_bytes_sent = 0;
1245 req->rq_snd_buf.len = 0;
1246 req->rq_snd_buf.buflen = 0;
1247 req->rq_rcv_buf.len = 0;
1248 req->rq_rcv_buf.buflen = 0;
1249 req->rq_release_snd_buf = NULL;
1250 xprt_reset_majortimeo(req);
1251 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1252 req, ntohl(req->rq_xid));
1253 }
1254
1255 /**
1256 * xprt_release - release an RPC request slot
1257 * @task: task which is finished with the slot
1258 *
1259 */
1260 void xprt_release(struct rpc_task *task)
1261 {
1262 struct rpc_xprt *xprt;
1263 struct rpc_rqst *req = task->tk_rqstp;
1264
1265 if (req == NULL) {
1266 if (task->tk_client) {
1267 rcu_read_lock();
1268 xprt = rcu_dereference(task->tk_client->cl_xprt);
1269 if (xprt->snd_task == task)
1270 xprt_release_write(xprt, task);
1271 rcu_read_unlock();
1272 }
1273 return;
1274 }
1275
1276 xprt = req->rq_xprt;
1277 if (task->tk_ops->rpc_count_stats != NULL)
1278 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1279 else if (task->tk_client)
1280 rpc_count_iostats(task, task->tk_client->cl_metrics);
1281 spin_lock_bh(&xprt->transport_lock);
1282 xprt->ops->release_xprt(xprt, task);
1283 if (xprt->ops->release_request)
1284 xprt->ops->release_request(task);
1285 if (!list_empty(&req->rq_list))
1286 list_del(&req->rq_list);
1287 xprt->last_used = jiffies;
1288 if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1289 mod_timer(&xprt->timer,
1290 xprt->last_used + xprt->idle_timeout);
1291 spin_unlock_bh(&xprt->transport_lock);
1292 if (req->rq_buffer)
1293 xprt->ops->buf_free(req->rq_buffer);
1294 xprt_inject_disconnect(xprt);
1295 if (req->rq_cred != NULL)
1296 put_rpccred(req->rq_cred);
1297 task->tk_rqstp = NULL;
1298 if (req->rq_release_snd_buf)
1299 req->rq_release_snd_buf(req);
1300
1301 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1302 if (likely(!bc_prealloc(req)))
1303 xprt_free_slot(xprt, req);
1304 else
1305 xprt_free_bc_request(req);
1306 }
1307
1308 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1309 {
1310 atomic_set(&xprt->count, 1);
1311
1312 spin_lock_init(&xprt->transport_lock);
1313 spin_lock_init(&xprt->reserve_lock);
1314
1315 INIT_LIST_HEAD(&xprt->free);
1316 INIT_LIST_HEAD(&xprt->recv);
1317 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1318 spin_lock_init(&xprt->bc_pa_lock);
1319 INIT_LIST_HEAD(&xprt->bc_pa_list);
1320 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1321
1322 xprt->last_used = jiffies;
1323 xprt->cwnd = RPC_INITCWND;
1324 xprt->bind_index = 0;
1325
1326 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1327 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1328 rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1329 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1330
1331 xprt_init_xid(xprt);
1332
1333 xprt->xprt_net = get_net(net);
1334 }
1335
1336 /**
1337 * xprt_create_transport - create an RPC transport
1338 * @args: rpc transport creation arguments
1339 *
1340 */
1341 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1342 {
1343 struct rpc_xprt *xprt;
1344 struct xprt_class *t;
1345
1346 spin_lock(&xprt_list_lock);
1347 list_for_each_entry(t, &xprt_list, list) {
1348 if (t->ident == args->ident) {
1349 spin_unlock(&xprt_list_lock);
1350 goto found;
1351 }
1352 }
1353 spin_unlock(&xprt_list_lock);
1354 dprintk("RPC: transport (%d) not supported\n", args->ident);
1355 return ERR_PTR(-EIO);
1356
1357 found:
1358 xprt = t->setup(args);
1359 if (IS_ERR(xprt)) {
1360 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1361 -PTR_ERR(xprt));
1362 goto out;
1363 }
1364 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1365 xprt->idle_timeout = 0;
1366 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1367 if (xprt_has_timer(xprt))
1368 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1369 (unsigned long)xprt);
1370 else
1371 init_timer(&xprt->timer);
1372
1373 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1374 xprt_destroy(xprt);
1375 return ERR_PTR(-EINVAL);
1376 }
1377 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1378 if (xprt->servername == NULL) {
1379 xprt_destroy(xprt);
1380 return ERR_PTR(-ENOMEM);
1381 }
1382
1383 rpc_xprt_debugfs_register(xprt);
1384
1385 dprintk("RPC: created transport %p with %u slots\n", xprt,
1386 xprt->max_reqs);
1387 out:
1388 return xprt;
1389 }
1390
1391 /**
1392 * xprt_destroy - destroy an RPC transport, killing off all requests.
1393 * @xprt: transport to destroy
1394 *
1395 */
1396 static void xprt_destroy(struct rpc_xprt *xprt)
1397 {
1398 dprintk("RPC: destroying transport %p\n", xprt);
1399
1400 /* Exclude transport connect/disconnect handlers */
1401 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1402
1403 del_timer_sync(&xprt->timer);
1404
1405 rpc_xprt_debugfs_unregister(xprt);
1406 rpc_destroy_wait_queue(&xprt->binding);
1407 rpc_destroy_wait_queue(&xprt->pending);
1408 rpc_destroy_wait_queue(&xprt->sending);
1409 rpc_destroy_wait_queue(&xprt->backlog);
1410 cancel_work_sync(&xprt->task_cleanup);
1411 kfree(xprt->servername);
1412 /*
1413 * Tear down transport state and free the rpc_xprt
1414 */
1415 xprt->ops->destroy(xprt);
1416 }
1417
1418 /**
1419 * xprt_put - release a reference to an RPC transport.
1420 * @xprt: pointer to the transport
1421 *
1422 */
1423 void xprt_put(struct rpc_xprt *xprt)
1424 {
1425 if (atomic_dec_and_test(&xprt->count))
1426 xprt_destroy(xprt);
1427 }
1428 EXPORT_SYMBOL_GPL(xprt_put);