]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sunrpc/xprt.c
[PATCH] RPC: expose API for serializing access to RPC transports
[mirror_ubuntu-bionic-kernel.git] / net / sunrpc / xprt.c
1 /*
2 * linux/net/sunrpc/xprt.c
3 *
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
6 *
7 * The interface works like this:
8 *
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
11 * (xprt_reserve).
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, it installs a timer that
16 * is run after the packet's timeout has expired.
17 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that transport. If a matching XID is found, the
19 * caller is woken up, and the timer removed.
20 * - When no reply arrives within the timeout interval, the timer is
21 * fired by the kernel and runs xprt_timer(). It either adjusts the
22 * timeout values (minor timeout) or wakes up the caller with a status
23 * of -ETIMEDOUT.
24 * - When the caller receives a notification from RPC that a reply arrived,
25 * it should release the RPC slot, and process the reply.
26 * If the call timed out, it may choose to retry the operation by
27 * adjusting the initial timeout value, and simply calling rpc_call
28 * again.
29 *
30 * Support for async RPC is done through a set of RPC-specific scheduling
31 * primitives that `transparently' work for processes as well as async
32 * tasks that rely on callbacks.
33 *
34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
35 *
36 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
37 */
38
39 #include <linux/module.h>
40
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/random.h>
45
46 #include <linux/sunrpc/clnt.h>
47
48 /*
49 * Local variables
50 */
51
52 #ifdef RPC_DEBUG
53 # undef RPC_DEBUG_DATA
54 # define RPCDBG_FACILITY RPCDBG_XPRT
55 #endif
56
57 /*
58 * Local functions
59 */
60 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
61 static inline void do_xprt_reserve(struct rpc_task *);
62 static void xprt_connect_status(struct rpc_task *task);
63 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
64
65 static int xprt_clear_backlog(struct rpc_xprt *xprt);
66
67 /**
68 * xprt_reserve_xprt - serialize write access to transports
69 * @task: task that is requesting access to the transport
70 *
71 * This prevents mixing the payload of separate requests, and prevents
72 * transport connects from colliding with writes. No congestion control
73 * is provided.
74 */
75 int xprt_reserve_xprt(struct rpc_task *task)
76 {
77 struct rpc_xprt *xprt = task->tk_xprt;
78 struct rpc_rqst *req = task->tk_rqstp;
79
80 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
81 if (task == xprt->snd_task)
82 return 1;
83 if (task == NULL)
84 return 0;
85 goto out_sleep;
86 }
87 xprt->snd_task = task;
88 if (req) {
89 req->rq_bytes_sent = 0;
90 req->rq_ntrans++;
91 }
92 return 1;
93
94 out_sleep:
95 dprintk("RPC: %4d failed to lock transport %p\n",
96 task->tk_pid, xprt);
97 task->tk_timeout = 0;
98 task->tk_status = -EAGAIN;
99 if (req && req->rq_ntrans)
100 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
101 else
102 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
103 return 0;
104 }
105
106 /*
107 * xprt_reserve_xprt_cong - serialize write access to transports
108 * @task: task that is requesting access to the transport
109 *
110 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
111 * integrated into the decision of whether a request is allowed to be
112 * woken up and given access to the transport.
113 */
114 int xprt_reserve_xprt_cong(struct rpc_task *task)
115 {
116 struct rpc_xprt *xprt = task->tk_xprt;
117 struct rpc_rqst *req = task->tk_rqstp;
118
119 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
120 if (task == xprt->snd_task)
121 return 1;
122 goto out_sleep;
123 }
124 if (__xprt_get_cong(xprt, task)) {
125 xprt->snd_task = task;
126 if (req) {
127 req->rq_bytes_sent = 0;
128 req->rq_ntrans++;
129 }
130 return 1;
131 }
132 smp_mb__before_clear_bit();
133 clear_bit(XPRT_LOCKED, &xprt->state);
134 smp_mb__after_clear_bit();
135 out_sleep:
136 dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt);
137 task->tk_timeout = 0;
138 task->tk_status = -EAGAIN;
139 if (req && req->rq_ntrans)
140 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
141 else
142 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
143 return 0;
144 }
145
146 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
147 {
148 int retval;
149
150 spin_lock_bh(&xprt->transport_lock);
151 retval = xprt->ops->reserve_xprt(task);
152 spin_unlock_bh(&xprt->transport_lock);
153 return retval;
154 }
155
156
157 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
158 {
159 struct rpc_task *task;
160
161 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
162 return;
163 if (!xprt->nocong && RPCXPRT_CONGESTED(xprt))
164 goto out_unlock;
165 task = rpc_wake_up_next(&xprt->resend);
166 if (!task) {
167 task = rpc_wake_up_next(&xprt->sending);
168 if (!task)
169 goto out_unlock;
170 }
171 if (xprt->nocong || __xprt_get_cong(xprt, task)) {
172 struct rpc_rqst *req = task->tk_rqstp;
173 xprt->snd_task = task;
174 if (req) {
175 req->rq_bytes_sent = 0;
176 req->rq_ntrans++;
177 }
178 return;
179 }
180 out_unlock:
181 smp_mb__before_clear_bit();
182 clear_bit(XPRT_LOCKED, &xprt->state);
183 smp_mb__after_clear_bit();
184 }
185
186 /*
187 * Releases the transport for use by other requests.
188 */
189 static void
190 __xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
191 {
192 if (xprt->snd_task == task) {
193 xprt->snd_task = NULL;
194 smp_mb__before_clear_bit();
195 clear_bit(XPRT_LOCKED, &xprt->state);
196 smp_mb__after_clear_bit();
197 __xprt_lock_write_next(xprt);
198 }
199 }
200
201 static inline void
202 xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
203 {
204 spin_lock_bh(&xprt->transport_lock);
205 __xprt_release_write(xprt, task);
206 spin_unlock_bh(&xprt->transport_lock);
207 }
208
209 /*
210 * Van Jacobson congestion avoidance. Check if the congestion window
211 * overflowed. Put the task to sleep if this is the case.
212 */
213 static int
214 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
215 {
216 struct rpc_rqst *req = task->tk_rqstp;
217
218 if (req->rq_cong)
219 return 1;
220 dprintk("RPC: %4d xprt_cwnd_limited cong = %ld cwnd = %ld\n",
221 task->tk_pid, xprt->cong, xprt->cwnd);
222 if (RPCXPRT_CONGESTED(xprt))
223 return 0;
224 req->rq_cong = 1;
225 xprt->cong += RPC_CWNDSCALE;
226 return 1;
227 }
228
229 /*
230 * Adjust the congestion window, and wake up the next task
231 * that has been sleeping due to congestion
232 */
233 static void
234 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
235 {
236 if (!req->rq_cong)
237 return;
238 req->rq_cong = 0;
239 xprt->cong -= RPC_CWNDSCALE;
240 __xprt_lock_write_next(xprt);
241 }
242
243 /*
244 * Adjust RPC congestion window
245 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
246 */
247 static void
248 xprt_adjust_cwnd(struct rpc_xprt *xprt, int result)
249 {
250 unsigned long cwnd;
251
252 cwnd = xprt->cwnd;
253 if (result >= 0 && cwnd <= xprt->cong) {
254 /* The (cwnd >> 1) term makes sure
255 * the result gets rounded properly. */
256 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
257 if (cwnd > RPC_MAXCWND(xprt))
258 cwnd = RPC_MAXCWND(xprt);
259 __xprt_lock_write_next(xprt);
260 } else if (result == -ETIMEDOUT) {
261 cwnd >>= 1;
262 if (cwnd < RPC_CWNDSCALE)
263 cwnd = RPC_CWNDSCALE;
264 }
265 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
266 xprt->cong, xprt->cwnd, cwnd);
267 xprt->cwnd = cwnd;
268 }
269
270 /**
271 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
272 * @xprt: transport with waiting tasks
273 * @status: result code to plant in each task before waking it
274 *
275 */
276 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
277 {
278 if (status < 0)
279 rpc_wake_up_status(&xprt->pending, status);
280 else
281 rpc_wake_up(&xprt->pending);
282 }
283
284 /**
285 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
286 * @task: task to be put to sleep
287 *
288 */
289 void xprt_wait_for_buffer_space(struct rpc_task *task)
290 {
291 struct rpc_rqst *req = task->tk_rqstp;
292 struct rpc_xprt *xprt = req->rq_xprt;
293
294 task->tk_timeout = req->rq_timeout;
295 rpc_sleep_on(&xprt->pending, task, NULL, NULL);
296 }
297
298 /**
299 * xprt_write_space - wake the task waiting for transport output buffer space
300 * @xprt: transport with waiting tasks
301 *
302 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
303 */
304 void xprt_write_space(struct rpc_xprt *xprt)
305 {
306 if (unlikely(xprt->shutdown))
307 return;
308
309 spin_lock_bh(&xprt->transport_lock);
310 if (xprt->snd_task) {
311 dprintk("RPC: write space: waking waiting task on xprt %p\n",
312 xprt);
313 rpc_wake_up_task(xprt->snd_task);
314 }
315 spin_unlock_bh(&xprt->transport_lock);
316 }
317
318 /**
319 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
320 * @task: task whose timeout is to be set
321 *
322 * Set a request's retransmit timeout based on the transport's
323 * default timeout parameters. Used by transports that don't adjust
324 * the retransmit timeout based on round-trip time estimation.
325 */
326 void xprt_set_retrans_timeout_def(struct rpc_task *task)
327 {
328 task->tk_timeout = task->tk_rqstp->rq_timeout;
329 }
330
331 /*
332 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
333 * @task: task whose timeout is to be set
334 *
335 * Set a request's retransmit timeout using the RTT estimator.
336 */
337 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
338 {
339 int timer = task->tk_msg.rpc_proc->p_timer;
340 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
341 struct rpc_rqst *req = task->tk_rqstp;
342 unsigned long max_timeout = req->rq_xprt->timeout.to_maxval;
343
344 task->tk_timeout = rpc_calc_rto(rtt, timer);
345 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
346 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
347 task->tk_timeout = max_timeout;
348 }
349
350 static void xprt_reset_majortimeo(struct rpc_rqst *req)
351 {
352 struct rpc_timeout *to = &req->rq_xprt->timeout;
353
354 req->rq_majortimeo = req->rq_timeout;
355 if (to->to_exponential)
356 req->rq_majortimeo <<= to->to_retries;
357 else
358 req->rq_majortimeo += to->to_increment * to->to_retries;
359 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
360 req->rq_majortimeo = to->to_maxval;
361 req->rq_majortimeo += jiffies;
362 }
363
364 /**
365 * xprt_adjust_timeout - adjust timeout values for next retransmit
366 * @req: RPC request containing parameters to use for the adjustment
367 *
368 */
369 int xprt_adjust_timeout(struct rpc_rqst *req)
370 {
371 struct rpc_xprt *xprt = req->rq_xprt;
372 struct rpc_timeout *to = &xprt->timeout;
373 int status = 0;
374
375 if (time_before(jiffies, req->rq_majortimeo)) {
376 if (to->to_exponential)
377 req->rq_timeout <<= 1;
378 else
379 req->rq_timeout += to->to_increment;
380 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
381 req->rq_timeout = to->to_maxval;
382 req->rq_retries++;
383 pprintk("RPC: %lu retrans\n", jiffies);
384 } else {
385 req->rq_timeout = to->to_initval;
386 req->rq_retries = 0;
387 xprt_reset_majortimeo(req);
388 /* Reset the RTT counters == "slow start" */
389 spin_lock_bh(&xprt->transport_lock);
390 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
391 spin_unlock_bh(&xprt->transport_lock);
392 pprintk("RPC: %lu timeout\n", jiffies);
393 status = -ETIMEDOUT;
394 }
395
396 if (req->rq_timeout == 0) {
397 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
398 req->rq_timeout = 5 * HZ;
399 }
400 return status;
401 }
402
403 static void xprt_autoclose(void *args)
404 {
405 struct rpc_xprt *xprt = (struct rpc_xprt *)args;
406
407 xprt_disconnect(xprt);
408 xprt->ops->close(xprt);
409 xprt_release_write(xprt, NULL);
410 }
411
412 /**
413 * xprt_disconnect - mark a transport as disconnected
414 * @xprt: transport to flag for disconnect
415 *
416 */
417 void xprt_disconnect(struct rpc_xprt *xprt)
418 {
419 dprintk("RPC: disconnected transport %p\n", xprt);
420 spin_lock_bh(&xprt->transport_lock);
421 xprt_clear_connected(xprt);
422 xprt_wake_pending_tasks(xprt, -ENOTCONN);
423 spin_unlock_bh(&xprt->transport_lock);
424 }
425
426 static void
427 xprt_init_autodisconnect(unsigned long data)
428 {
429 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
430
431 spin_lock(&xprt->transport_lock);
432 if (!list_empty(&xprt->recv) || xprt->shutdown)
433 goto out_abort;
434 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
435 goto out_abort;
436 spin_unlock(&xprt->transport_lock);
437 if (xprt_connecting(xprt))
438 xprt_release_write(xprt, NULL);
439 else
440 schedule_work(&xprt->task_cleanup);
441 return;
442 out_abort:
443 spin_unlock(&xprt->transport_lock);
444 }
445
446 /**
447 * xprt_connect - schedule a transport connect operation
448 * @task: RPC task that is requesting the connect
449 *
450 */
451 void xprt_connect(struct rpc_task *task)
452 {
453 struct rpc_xprt *xprt = task->tk_xprt;
454
455 dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid,
456 xprt, (xprt_connected(xprt) ? "is" : "is not"));
457
458 if (xprt->shutdown) {
459 task->tk_status = -EIO;
460 return;
461 }
462 if (!xprt->addr.sin_port) {
463 task->tk_status = -EIO;
464 return;
465 }
466 if (!xprt_lock_write(xprt, task))
467 return;
468 if (xprt_connected(xprt))
469 xprt_release_write(xprt, task);
470 else {
471 if (task->tk_rqstp)
472 task->tk_rqstp->rq_bytes_sent = 0;
473
474 task->tk_timeout = RPC_CONNECT_TIMEOUT;
475 rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
476 xprt->ops->connect(task);
477 }
478 return;
479 }
480
481 static void xprt_connect_status(struct rpc_task *task)
482 {
483 struct rpc_xprt *xprt = task->tk_xprt;
484
485 if (task->tk_status >= 0) {
486 dprintk("RPC: %4d xprt_connect_status: connection established\n",
487 task->tk_pid);
488 return;
489 }
490
491 switch (task->tk_status) {
492 case -ECONNREFUSED:
493 case -ECONNRESET:
494 dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n",
495 task->tk_pid, task->tk_client->cl_server);
496 break;
497 case -ENOTCONN:
498 dprintk("RPC: %4d xprt_connect_status: connection broken\n",
499 task->tk_pid);
500 break;
501 case -ETIMEDOUT:
502 dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n",
503 task->tk_pid);
504 break;
505 default:
506 dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n",
507 task->tk_pid, -task->tk_status, task->tk_client->cl_server);
508 xprt_release_write(xprt, task);
509 task->tk_status = -EIO;
510 return;
511 }
512
513 /* if soft mounted, just cause this RPC to fail */
514 if (RPC_IS_SOFT(task)) {
515 xprt_release_write(xprt, task);
516 task->tk_status = -EIO;
517 }
518 }
519
520 /**
521 * xprt_lookup_rqst - find an RPC request corresponding to an XID
522 * @xprt: transport on which the original request was transmitted
523 * @xid: RPC XID of incoming reply
524 *
525 */
526 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
527 {
528 struct list_head *pos;
529 struct rpc_rqst *req = NULL;
530
531 list_for_each(pos, &xprt->recv) {
532 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
533 if (entry->rq_xid == xid) {
534 req = entry;
535 break;
536 }
537 }
538 return req;
539 }
540
541 /**
542 * xprt_complete_rqst - called when reply processing is complete
543 * @xprt: controlling transport
544 * @req: RPC request that just completed
545 * @copied: actual number of bytes received from the transport
546 *
547 */
548 void xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
549 {
550 struct rpc_task *task = req->rq_task;
551 struct rpc_clnt *clnt = task->tk_client;
552
553 /* Adjust congestion window */
554 if (!xprt->nocong) {
555 unsigned timer = task->tk_msg.rpc_proc->p_timer;
556 xprt_adjust_cwnd(xprt, copied);
557 __xprt_put_cong(xprt, req);
558 if (timer) {
559 if (req->rq_ntrans == 1)
560 rpc_update_rtt(clnt->cl_rtt, timer,
561 (long)jiffies - req->rq_xtime);
562 rpc_set_timeo(clnt->cl_rtt, timer, req->rq_ntrans - 1);
563 }
564 }
565
566 #ifdef RPC_PROFILE
567 /* Profile only reads for now */
568 if (copied > 1024) {
569 static unsigned long nextstat;
570 static unsigned long pkt_rtt, pkt_len, pkt_cnt;
571
572 pkt_cnt++;
573 pkt_len += req->rq_slen + copied;
574 pkt_rtt += jiffies - req->rq_xtime;
575 if (time_before(nextstat, jiffies)) {
576 printk("RPC: %lu %ld cwnd\n", jiffies, xprt->cwnd);
577 printk("RPC: %ld %ld %ld %ld stat\n",
578 jiffies, pkt_cnt, pkt_len, pkt_rtt);
579 pkt_rtt = pkt_len = pkt_cnt = 0;
580 nextstat = jiffies + 5 * HZ;
581 }
582 }
583 #endif
584
585 dprintk("RPC: %4d has input (%d bytes)\n", task->tk_pid, copied);
586 list_del_init(&req->rq_list);
587 req->rq_received = req->rq_private_buf.len = copied;
588
589 /* ... and wake up the process. */
590 rpc_wake_up_task(task);
591 return;
592 }
593
594 /*
595 * RPC receive timeout handler.
596 */
597 static void
598 xprt_timer(struct rpc_task *task)
599 {
600 struct rpc_rqst *req = task->tk_rqstp;
601 struct rpc_xprt *xprt = req->rq_xprt;
602
603 spin_lock(&xprt->transport_lock);
604 if (req->rq_received)
605 goto out;
606
607 xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
608 __xprt_put_cong(xprt, req);
609
610 dprintk("RPC: %4d xprt_timer (%s request)\n",
611 task->tk_pid, req ? "pending" : "backlogged");
612
613 task->tk_status = -ETIMEDOUT;
614 out:
615 task->tk_timeout = 0;
616 rpc_wake_up_task(task);
617 spin_unlock(&xprt->transport_lock);
618 }
619
620 /**
621 * xprt_prepare_transmit - reserve the transport before sending a request
622 * @task: RPC task about to send a request
623 *
624 */
625 int xprt_prepare_transmit(struct rpc_task *task)
626 {
627 struct rpc_rqst *req = task->tk_rqstp;
628 struct rpc_xprt *xprt = req->rq_xprt;
629 int err = 0;
630
631 dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid);
632
633 if (xprt->shutdown)
634 return -EIO;
635
636 spin_lock_bh(&xprt->transport_lock);
637 if (req->rq_received && !req->rq_bytes_sent) {
638 err = req->rq_received;
639 goto out_unlock;
640 }
641 if (!xprt->ops->reserve_xprt(task)) {
642 err = -EAGAIN;
643 goto out_unlock;
644 }
645
646 if (!xprt_connected(xprt)) {
647 err = -ENOTCONN;
648 goto out_unlock;
649 }
650 out_unlock:
651 spin_unlock_bh(&xprt->transport_lock);
652 return err;
653 }
654
655 /**
656 * xprt_transmit - send an RPC request on a transport
657 * @task: controlling RPC task
658 *
659 * We have to copy the iovec because sendmsg fiddles with its contents.
660 */
661 void xprt_transmit(struct rpc_task *task)
662 {
663 struct rpc_rqst *req = task->tk_rqstp;
664 struct rpc_xprt *xprt = req->rq_xprt;
665 int status;
666
667 dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
668
669 smp_rmb();
670 if (!req->rq_received) {
671 if (list_empty(&req->rq_list)) {
672 spin_lock_bh(&xprt->transport_lock);
673 /* Update the softirq receive buffer */
674 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
675 sizeof(req->rq_private_buf));
676 /* Add request to the receive list */
677 list_add_tail(&req->rq_list, &xprt->recv);
678 spin_unlock_bh(&xprt->transport_lock);
679 xprt_reset_majortimeo(req);
680 /* Turn off autodisconnect */
681 del_singleshot_timer_sync(&xprt->timer);
682 }
683 } else if (!req->rq_bytes_sent)
684 return;
685
686 status = xprt->ops->send_request(task);
687 if (status == 0) {
688 dprintk("RPC: %4d xmit complete\n", task->tk_pid);
689 spin_lock_bh(&xprt->transport_lock);
690 xprt->ops->set_retrans_timeout(task);
691 /* Don't race with disconnect */
692 if (!xprt_connected(xprt))
693 task->tk_status = -ENOTCONN;
694 else if (!req->rq_received)
695 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
696 __xprt_release_write(xprt, task);
697 spin_unlock_bh(&xprt->transport_lock);
698 return;
699 }
700
701 /* Note: at this point, task->tk_sleeping has not yet been set,
702 * hence there is no danger of the waking up task being put on
703 * schedq, and being picked up by a parallel run of rpciod().
704 */
705 task->tk_status = status;
706
707 switch (status) {
708 case -ECONNREFUSED:
709 task->tk_timeout = RPC_REESTABLISH_TIMEOUT;
710 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
711 case -EAGAIN:
712 case -ENOTCONN:
713 return;
714 default:
715 break;
716 }
717 xprt_release_write(xprt, task);
718 return;
719 }
720
721 static inline void do_xprt_reserve(struct rpc_task *task)
722 {
723 struct rpc_xprt *xprt = task->tk_xprt;
724
725 task->tk_status = 0;
726 if (task->tk_rqstp)
727 return;
728 if (!list_empty(&xprt->free)) {
729 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
730 list_del_init(&req->rq_list);
731 task->tk_rqstp = req;
732 xprt_request_init(task, xprt);
733 return;
734 }
735 dprintk("RPC: waiting for request slot\n");
736 task->tk_status = -EAGAIN;
737 task->tk_timeout = 0;
738 rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
739 }
740
741 /**
742 * xprt_reserve - allocate an RPC request slot
743 * @task: RPC task requesting a slot allocation
744 *
745 * If no more slots are available, place the task on the transport's
746 * backlog queue.
747 */
748 void xprt_reserve(struct rpc_task *task)
749 {
750 struct rpc_xprt *xprt = task->tk_xprt;
751
752 task->tk_status = -EIO;
753 if (!xprt->shutdown) {
754 spin_lock(&xprt->reserve_lock);
755 do_xprt_reserve(task);
756 spin_unlock(&xprt->reserve_lock);
757 }
758 }
759
760 static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt)
761 {
762 return xprt->xid++;
763 }
764
765 static inline void xprt_init_xid(struct rpc_xprt *xprt)
766 {
767 get_random_bytes(&xprt->xid, sizeof(xprt->xid));
768 }
769
770 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
771 {
772 struct rpc_rqst *req = task->tk_rqstp;
773
774 req->rq_timeout = xprt->timeout.to_initval;
775 req->rq_task = task;
776 req->rq_xprt = xprt;
777 req->rq_xid = xprt_alloc_xid(xprt);
778 dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
779 req, ntohl(req->rq_xid));
780 }
781
782 /**
783 * xprt_release - release an RPC request slot
784 * @task: task which is finished with the slot
785 *
786 */
787 void xprt_release(struct rpc_task *task)
788 {
789 struct rpc_xprt *xprt = task->tk_xprt;
790 struct rpc_rqst *req;
791
792 if (!(req = task->tk_rqstp))
793 return;
794 spin_lock_bh(&xprt->transport_lock);
795 __xprt_release_write(xprt, task);
796 __xprt_put_cong(xprt, req);
797 if (!list_empty(&req->rq_list))
798 list_del(&req->rq_list);
799 xprt->last_used = jiffies;
800 if (list_empty(&xprt->recv) && !xprt->shutdown)
801 mod_timer(&xprt->timer,
802 xprt->last_used + RPC_IDLE_DISCONNECT_TIMEOUT);
803 spin_unlock_bh(&xprt->transport_lock);
804 task->tk_rqstp = NULL;
805 memset(req, 0, sizeof(*req)); /* mark unused */
806
807 dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
808
809 spin_lock(&xprt->reserve_lock);
810 list_add(&req->rq_list, &xprt->free);
811 xprt_clear_backlog(xprt);
812 spin_unlock(&xprt->reserve_lock);
813 }
814
815 /**
816 * xprt_set_timeout - set constant RPC timeout
817 * @to: RPC timeout parameters to set up
818 * @retr: number of retries
819 * @incr: amount of increase after each retry
820 *
821 */
822 void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
823 {
824 to->to_initval =
825 to->to_increment = incr;
826 to->to_maxval = to->to_initval + (incr * retr);
827 to->to_retries = retr;
828 to->to_exponential = 0;
829 }
830
831 static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
832 {
833 int result;
834 struct rpc_xprt *xprt;
835 struct rpc_rqst *req;
836
837 if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
838 return ERR_PTR(-ENOMEM);
839 memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */
840
841 xprt->addr = *ap;
842
843 switch (proto) {
844 case IPPROTO_UDP:
845 result = xs_setup_udp(xprt, to);
846 break;
847 case IPPROTO_TCP:
848 result = xs_setup_tcp(xprt, to);
849 break;
850 default:
851 printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
852 proto);
853 result = -EIO;
854 break;
855 }
856 if (result) {
857 kfree(xprt);
858 return ERR_PTR(result);
859 }
860
861 spin_lock_init(&xprt->transport_lock);
862 spin_lock_init(&xprt->reserve_lock);
863 init_waitqueue_head(&xprt->cong_wait);
864
865 INIT_LIST_HEAD(&xprt->free);
866 INIT_LIST_HEAD(&xprt->recv);
867 INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
868 init_timer(&xprt->timer);
869 xprt->timer.function = xprt_init_autodisconnect;
870 xprt->timer.data = (unsigned long) xprt;
871 xprt->last_used = jiffies;
872
873 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
874 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
875 rpc_init_wait_queue(&xprt->resend, "xprt_resend");
876 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
877
878 /* initialize free list */
879 for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
880 list_add(&req->rq_list, &xprt->free);
881
882 xprt_init_xid(xprt);
883
884 dprintk("RPC: created transport %p with %u slots\n", xprt,
885 xprt->max_reqs);
886
887 return xprt;
888 }
889
890 /**
891 * xprt_create_proto - create an RPC client transport
892 * @proto: requested transport protocol
893 * @sap: remote peer's address
894 * @to: timeout parameters for new transport
895 *
896 */
897 struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
898 {
899 struct rpc_xprt *xprt;
900
901 xprt = xprt_setup(proto, sap, to);
902 if (IS_ERR(xprt))
903 dprintk("RPC: xprt_create_proto failed\n");
904 else
905 dprintk("RPC: xprt_create_proto created xprt %p\n", xprt);
906 return xprt;
907 }
908
909 static void xprt_shutdown(struct rpc_xprt *xprt)
910 {
911 xprt->shutdown = 1;
912 rpc_wake_up(&xprt->sending);
913 rpc_wake_up(&xprt->resend);
914 xprt_wake_pending_tasks(xprt, -EIO);
915 rpc_wake_up(&xprt->backlog);
916 wake_up(&xprt->cong_wait);
917 del_timer_sync(&xprt->timer);
918 }
919
920 static int xprt_clear_backlog(struct rpc_xprt *xprt) {
921 rpc_wake_up_next(&xprt->backlog);
922 wake_up(&xprt->cong_wait);
923 return 1;
924 }
925
926 /**
927 * xprt_destroy - destroy an RPC transport, killing off all requests.
928 * @xprt: transport to destroy
929 *
930 */
931 int xprt_destroy(struct rpc_xprt *xprt)
932 {
933 dprintk("RPC: destroying transport %p\n", xprt);
934 xprt_shutdown(xprt);
935 xprt->ops->destroy(xprt);
936 kfree(xprt);
937
938 return 0;
939 }