1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/xprt.c
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
8 * The interface works like this:
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
13 * - Next, the caller puts together the RPC message, stuffs it into
14 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
16 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
19 * - When a packet arrives, the data_ready handler walks the list of
20 * pending requests for that transport. If a matching XID is found, the
21 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
41 #include <linux/module.h>
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
55 #include <trace/events/sunrpc.h>
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY RPCDBG_XPRT
70 static void xprt_init(struct rpc_xprt
*xprt
, struct net
*net
);
71 static __be32
xprt_alloc_xid(struct rpc_xprt
*xprt
);
72 static void xprt_destroy(struct rpc_xprt
*xprt
);
74 static DEFINE_SPINLOCK(xprt_list_lock
);
75 static LIST_HEAD(xprt_list
);
77 static unsigned long xprt_request_timeout(const struct rpc_rqst
*req
)
79 unsigned long timeout
= jiffies
+ req
->rq_timeout
;
81 if (time_before(timeout
, req
->rq_majortimeo
))
83 return req
->rq_majortimeo
;
87 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
94 * 0: transport successfully registered
95 * -EEXIST: transport already registered
96 * -EINVAL: transport module being unloaded
98 int xprt_register_transport(struct xprt_class
*transport
)
100 struct xprt_class
*t
;
104 spin_lock(&xprt_list_lock
);
105 list_for_each_entry(t
, &xprt_list
, list
) {
106 /* don't register the same transport class twice */
107 if (t
->ident
== transport
->ident
)
111 list_add_tail(&transport
->list
, &xprt_list
);
112 printk(KERN_INFO
"RPC: Registered %s transport module.\n",
117 spin_unlock(&xprt_list_lock
);
120 EXPORT_SYMBOL_GPL(xprt_register_transport
);
123 * xprt_unregister_transport - unregister a transport implementation
124 * @transport: transport to unregister
127 * 0: transport successfully unregistered
128 * -ENOENT: transport never registered
130 int xprt_unregister_transport(struct xprt_class
*transport
)
132 struct xprt_class
*t
;
136 spin_lock(&xprt_list_lock
);
137 list_for_each_entry(t
, &xprt_list
, list
) {
138 if (t
== transport
) {
140 "RPC: Unregistered %s transport module.\n",
142 list_del_init(&transport
->list
);
149 spin_unlock(&xprt_list_lock
);
152 EXPORT_SYMBOL_GPL(xprt_unregister_transport
);
155 * xprt_load_transport - load a transport implementation
156 * @transport_name: transport to load
159 * 0: transport successfully loaded
160 * -ENOENT: transport module not available
162 int xprt_load_transport(const char *transport_name
)
164 struct xprt_class
*t
;
168 spin_lock(&xprt_list_lock
);
169 list_for_each_entry(t
, &xprt_list
, list
) {
170 if (strcmp(t
->name
, transport_name
) == 0) {
171 spin_unlock(&xprt_list_lock
);
175 spin_unlock(&xprt_list_lock
);
176 result
= request_module("xprt%s", transport_name
);
180 EXPORT_SYMBOL_GPL(xprt_load_transport
);
182 static void xprt_clear_locked(struct rpc_xprt
*xprt
)
184 xprt
->snd_task
= NULL
;
185 if (!test_bit(XPRT_CLOSE_WAIT
, &xprt
->state
)) {
186 smp_mb__before_atomic();
187 clear_bit(XPRT_LOCKED
, &xprt
->state
);
188 smp_mb__after_atomic();
190 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
194 * xprt_reserve_xprt - serialize write access to transports
195 * @task: task that is requesting access to the transport
196 * @xprt: pointer to the target transport
198 * This prevents mixing the payload of separate requests, and prevents
199 * transport connects from colliding with writes. No congestion control
202 int xprt_reserve_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
204 struct rpc_rqst
*req
= task
->tk_rqstp
;
206 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
207 if (task
== xprt
->snd_task
)
211 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
213 xprt
->snd_task
= task
;
218 xprt_clear_locked(xprt
);
220 dprintk("RPC: %5u failed to lock transport %p\n",
222 task
->tk_status
= -EAGAIN
;
223 if (RPC_IS_SOFT(task
))
224 rpc_sleep_on_timeout(&xprt
->sending
, task
, NULL
,
225 xprt_request_timeout(req
));
227 rpc_sleep_on(&xprt
->sending
, task
, NULL
);
230 EXPORT_SYMBOL_GPL(xprt_reserve_xprt
);
233 xprt_need_congestion_window_wait(struct rpc_xprt
*xprt
)
235 return test_bit(XPRT_CWND_WAIT
, &xprt
->state
);
239 xprt_set_congestion_window_wait(struct rpc_xprt
*xprt
)
241 if (!list_empty(&xprt
->xmit_queue
)) {
242 /* Peek at head of queue to see if it can make progress */
243 if (list_first_entry(&xprt
->xmit_queue
, struct rpc_rqst
,
247 set_bit(XPRT_CWND_WAIT
, &xprt
->state
);
251 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt
*xprt
)
253 if (!RPCXPRT_CONGESTED(xprt
))
254 clear_bit(XPRT_CWND_WAIT
, &xprt
->state
);
258 * xprt_reserve_xprt_cong - serialize write access to transports
259 * @task: task that is requesting access to the transport
261 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
262 * integrated into the decision of whether a request is allowed to be
263 * woken up and given access to the transport.
264 * Note that the lock is only granted if we know there are free slots.
266 int xprt_reserve_xprt_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
268 struct rpc_rqst
*req
= task
->tk_rqstp
;
270 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
271 if (task
== xprt
->snd_task
)
276 xprt
->snd_task
= task
;
279 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
281 if (!xprt_need_congestion_window_wait(xprt
)) {
282 xprt
->snd_task
= task
;
286 xprt_clear_locked(xprt
);
288 dprintk("RPC: %5u failed to lock transport %p\n", task
->tk_pid
, xprt
);
289 task
->tk_status
= -EAGAIN
;
290 if (RPC_IS_SOFT(task
))
291 rpc_sleep_on_timeout(&xprt
->sending
, task
, NULL
,
292 xprt_request_timeout(req
));
294 rpc_sleep_on(&xprt
->sending
, task
, NULL
);
297 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong
);
299 static inline int xprt_lock_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
303 if (test_bit(XPRT_LOCKED
, &xprt
->state
) && xprt
->snd_task
== task
)
305 spin_lock(&xprt
->transport_lock
);
306 retval
= xprt
->ops
->reserve_xprt(xprt
, task
);
307 spin_unlock(&xprt
->transport_lock
);
311 static bool __xprt_lock_write_func(struct rpc_task
*task
, void *data
)
313 struct rpc_xprt
*xprt
= data
;
315 xprt
->snd_task
= task
;
319 static void __xprt_lock_write_next(struct rpc_xprt
*xprt
)
321 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
323 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
325 if (rpc_wake_up_first_on_wq(xprtiod_workqueue
, &xprt
->sending
,
326 __xprt_lock_write_func
, xprt
))
329 xprt_clear_locked(xprt
);
332 static void __xprt_lock_write_next_cong(struct rpc_xprt
*xprt
)
334 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
336 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
338 if (xprt_need_congestion_window_wait(xprt
))
340 if (rpc_wake_up_first_on_wq(xprtiod_workqueue
, &xprt
->sending
,
341 __xprt_lock_write_func
, xprt
))
344 xprt_clear_locked(xprt
);
348 * xprt_release_xprt - allow other requests to use a transport
349 * @xprt: transport with other tasks potentially waiting
350 * @task: task that is releasing access to the transport
352 * Note that "task" can be NULL. No congestion control is provided.
354 void xprt_release_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
356 if (xprt
->snd_task
== task
) {
357 xprt_clear_locked(xprt
);
358 __xprt_lock_write_next(xprt
);
361 EXPORT_SYMBOL_GPL(xprt_release_xprt
);
364 * xprt_release_xprt_cong - allow other requests to use a transport
365 * @xprt: transport with other tasks potentially waiting
366 * @task: task that is releasing access to the transport
368 * Note that "task" can be NULL. Another task is awoken to use the
369 * transport if the transport's congestion window allows it.
371 void xprt_release_xprt_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
373 if (xprt
->snd_task
== task
) {
374 xprt_clear_locked(xprt
);
375 __xprt_lock_write_next_cong(xprt
);
378 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong
);
380 static inline void xprt_release_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
382 if (xprt
->snd_task
!= task
)
384 spin_lock(&xprt
->transport_lock
);
385 xprt
->ops
->release_xprt(xprt
, task
);
386 spin_unlock(&xprt
->transport_lock
);
390 * Van Jacobson congestion avoidance. Check if the congestion window
391 * overflowed. Put the task to sleep if this is the case.
394 __xprt_get_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
398 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
399 req
->rq_task
->tk_pid
, xprt
->cong
, xprt
->cwnd
);
400 if (RPCXPRT_CONGESTED(xprt
)) {
401 xprt_set_congestion_window_wait(xprt
);
405 xprt
->cong
+= RPC_CWNDSCALE
;
410 * Adjust the congestion window, and wake up the next task
411 * that has been sleeping due to congestion
414 __xprt_put_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
419 xprt
->cong
-= RPC_CWNDSCALE
;
420 xprt_test_and_clear_congestion_window_wait(xprt
);
421 __xprt_lock_write_next_cong(xprt
);
425 * xprt_request_get_cong - Request congestion control credits
426 * @xprt: pointer to transport
427 * @req: pointer to RPC request
429 * Useful for transports that require congestion control.
432 xprt_request_get_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
438 spin_lock(&xprt
->transport_lock
);
439 ret
= __xprt_get_cong(xprt
, req
) != 0;
440 spin_unlock(&xprt
->transport_lock
);
443 EXPORT_SYMBOL_GPL(xprt_request_get_cong
);
446 * xprt_release_rqst_cong - housekeeping when request is complete
447 * @task: RPC request that recently completed
449 * Useful for transports that require congestion control.
451 void xprt_release_rqst_cong(struct rpc_task
*task
)
453 struct rpc_rqst
*req
= task
->tk_rqstp
;
455 __xprt_put_cong(req
->rq_xprt
, req
);
457 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong
);
460 * Clear the congestion window wait flag and wake up the next
461 * entry on xprt->sending
464 xprt_clear_congestion_window_wait(struct rpc_xprt
*xprt
)
466 if (test_and_clear_bit(XPRT_CWND_WAIT
, &xprt
->state
)) {
467 spin_lock(&xprt
->transport_lock
);
468 __xprt_lock_write_next_cong(xprt
);
469 spin_unlock(&xprt
->transport_lock
);
474 * xprt_adjust_cwnd - adjust transport congestion window
475 * @xprt: pointer to xprt
476 * @task: recently completed RPC request used to adjust window
477 * @result: result code of completed RPC request
479 * The transport code maintains an estimate on the maximum number of out-
480 * standing RPC requests, using a smoothed version of the congestion
481 * avoidance implemented in 44BSD. This is basically the Van Jacobson
482 * congestion algorithm: If a retransmit occurs, the congestion window is
483 * halved; otherwise, it is incremented by 1/cwnd when
485 * - a reply is received and
486 * - a full number of requests are outstanding and
487 * - the congestion window hasn't been updated recently.
489 void xprt_adjust_cwnd(struct rpc_xprt
*xprt
, struct rpc_task
*task
, int result
)
491 struct rpc_rqst
*req
= task
->tk_rqstp
;
492 unsigned long cwnd
= xprt
->cwnd
;
494 if (result
>= 0 && cwnd
<= xprt
->cong
) {
495 /* The (cwnd >> 1) term makes sure
496 * the result gets rounded properly. */
497 cwnd
+= (RPC_CWNDSCALE
* RPC_CWNDSCALE
+ (cwnd
>> 1)) / cwnd
;
498 if (cwnd
> RPC_MAXCWND(xprt
))
499 cwnd
= RPC_MAXCWND(xprt
);
500 __xprt_lock_write_next_cong(xprt
);
501 } else if (result
== -ETIMEDOUT
) {
503 if (cwnd
< RPC_CWNDSCALE
)
504 cwnd
= RPC_CWNDSCALE
;
506 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
507 xprt
->cong
, xprt
->cwnd
, cwnd
);
509 __xprt_put_cong(xprt
, req
);
511 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd
);
514 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
515 * @xprt: transport with waiting tasks
516 * @status: result code to plant in each task before waking it
519 void xprt_wake_pending_tasks(struct rpc_xprt
*xprt
, int status
)
522 rpc_wake_up_status(&xprt
->pending
, status
);
524 rpc_wake_up(&xprt
->pending
);
526 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks
);
529 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
532 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
533 * we don't in general want to force a socket disconnection due to
534 * an incomplete RPC call transmission.
536 void xprt_wait_for_buffer_space(struct rpc_xprt
*xprt
)
538 set_bit(XPRT_WRITE_SPACE
, &xprt
->state
);
540 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space
);
543 xprt_clear_write_space_locked(struct rpc_xprt
*xprt
)
545 if (test_and_clear_bit(XPRT_WRITE_SPACE
, &xprt
->state
)) {
546 __xprt_lock_write_next(xprt
);
547 dprintk("RPC: write space: waking waiting task on "
555 * xprt_write_space - wake the task waiting for transport output buffer space
556 * @xprt: transport with waiting tasks
558 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
560 bool xprt_write_space(struct rpc_xprt
*xprt
)
564 if (!test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
566 spin_lock(&xprt
->transport_lock
);
567 ret
= xprt_clear_write_space_locked(xprt
);
568 spin_unlock(&xprt
->transport_lock
);
571 EXPORT_SYMBOL_GPL(xprt_write_space
);
573 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime
)
575 s64 delta
= ktime_to_ns(ktime_get() - abstime
);
576 return likely(delta
>= 0) ?
577 jiffies
- nsecs_to_jiffies(delta
) :
578 jiffies
+ nsecs_to_jiffies(-delta
);
581 static unsigned long xprt_calc_majortimeo(struct rpc_rqst
*req
)
583 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
584 unsigned long majortimeo
= req
->rq_timeout
;
586 if (to
->to_exponential
)
587 majortimeo
<<= to
->to_retries
;
589 majortimeo
+= to
->to_increment
* to
->to_retries
;
590 if (majortimeo
> to
->to_maxval
|| majortimeo
== 0)
591 majortimeo
= to
->to_maxval
;
595 static void xprt_reset_majortimeo(struct rpc_rqst
*req
)
597 req
->rq_majortimeo
+= xprt_calc_majortimeo(req
);
600 static void xprt_init_majortimeo(struct rpc_task
*task
, struct rpc_rqst
*req
)
602 unsigned long time_init
;
603 struct rpc_xprt
*xprt
= req
->rq_xprt
;
605 if (likely(xprt
&& xprt_connected(xprt
)))
608 time_init
= xprt_abs_ktime_to_jiffies(task
->tk_start
);
609 req
->rq_timeout
= task
->tk_client
->cl_timeout
->to_initval
;
610 req
->rq_majortimeo
= time_init
+ xprt_calc_majortimeo(req
);
614 * xprt_adjust_timeout - adjust timeout values for next retransmit
615 * @req: RPC request containing parameters to use for the adjustment
618 int xprt_adjust_timeout(struct rpc_rqst
*req
)
620 struct rpc_xprt
*xprt
= req
->rq_xprt
;
621 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
624 if (time_before(jiffies
, req
->rq_majortimeo
)) {
625 if (to
->to_exponential
)
626 req
->rq_timeout
<<= 1;
628 req
->rq_timeout
+= to
->to_increment
;
629 if (to
->to_maxval
&& req
->rq_timeout
>= to
->to_maxval
)
630 req
->rq_timeout
= to
->to_maxval
;
633 req
->rq_timeout
= to
->to_initval
;
635 xprt_reset_majortimeo(req
);
636 /* Reset the RTT counters == "slow start" */
637 spin_lock(&xprt
->transport_lock
);
638 rpc_init_rtt(req
->rq_task
->tk_client
->cl_rtt
, to
->to_initval
);
639 spin_unlock(&xprt
->transport_lock
);
643 if (req
->rq_timeout
== 0) {
644 printk(KERN_WARNING
"xprt_adjust_timeout: rq_timeout = 0!\n");
645 req
->rq_timeout
= 5 * HZ
;
650 static void xprt_autoclose(struct work_struct
*work
)
652 struct rpc_xprt
*xprt
=
653 container_of(work
, struct rpc_xprt
, task_cleanup
);
654 unsigned int pflags
= memalloc_nofs_save();
656 clear_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
657 xprt
->ops
->close(xprt
);
658 xprt_release_write(xprt
, NULL
);
659 wake_up_bit(&xprt
->state
, XPRT_LOCKED
);
660 memalloc_nofs_restore(pflags
);
664 * xprt_disconnect_done - mark a transport as disconnected
665 * @xprt: transport to flag for disconnect
668 void xprt_disconnect_done(struct rpc_xprt
*xprt
)
670 dprintk("RPC: disconnected transport %p\n", xprt
);
671 spin_lock(&xprt
->transport_lock
);
672 xprt_clear_connected(xprt
);
673 xprt_clear_write_space_locked(xprt
);
674 xprt_wake_pending_tasks(xprt
, -ENOTCONN
);
675 spin_unlock(&xprt
->transport_lock
);
677 EXPORT_SYMBOL_GPL(xprt_disconnect_done
);
680 * xprt_force_disconnect - force a transport to disconnect
681 * @xprt: transport to disconnect
684 void xprt_force_disconnect(struct rpc_xprt
*xprt
)
686 /* Don't race with the test_bit() in xprt_clear_locked() */
687 spin_lock(&xprt
->transport_lock
);
688 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
689 /* Try to schedule an autoclose RPC call */
690 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
691 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
692 else if (xprt
->snd_task
)
693 rpc_wake_up_queued_task_set_status(&xprt
->pending
,
694 xprt
->snd_task
, -ENOTCONN
);
695 spin_unlock(&xprt
->transport_lock
);
697 EXPORT_SYMBOL_GPL(xprt_force_disconnect
);
700 xprt_connect_cookie(struct rpc_xprt
*xprt
)
702 return READ_ONCE(xprt
->connect_cookie
);
706 xprt_request_retransmit_after_disconnect(struct rpc_task
*task
)
708 struct rpc_rqst
*req
= task
->tk_rqstp
;
709 struct rpc_xprt
*xprt
= req
->rq_xprt
;
711 return req
->rq_connect_cookie
!= xprt_connect_cookie(xprt
) ||
712 !xprt_connected(xprt
);
716 * xprt_conditional_disconnect - force a transport to disconnect
717 * @xprt: transport to disconnect
718 * @cookie: 'connection cookie'
720 * This attempts to break the connection if and only if 'cookie' matches
721 * the current transport 'connection cookie'. It ensures that we don't
722 * try to break the connection more than once when we need to retransmit
723 * a batch of RPC requests.
726 void xprt_conditional_disconnect(struct rpc_xprt
*xprt
, unsigned int cookie
)
728 /* Don't race with the test_bit() in xprt_clear_locked() */
729 spin_lock(&xprt
->transport_lock
);
730 if (cookie
!= xprt
->connect_cookie
)
732 if (test_bit(XPRT_CLOSING
, &xprt
->state
))
734 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
735 /* Try to schedule an autoclose RPC call */
736 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
737 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
738 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
740 spin_unlock(&xprt
->transport_lock
);
744 xprt_has_timer(const struct rpc_xprt
*xprt
)
746 return xprt
->idle_timeout
!= 0;
750 xprt_schedule_autodisconnect(struct rpc_xprt
*xprt
)
751 __must_hold(&xprt
->transport_lock
)
753 xprt
->last_used
= jiffies
;
754 if (RB_EMPTY_ROOT(&xprt
->recv_queue
) && xprt_has_timer(xprt
))
755 mod_timer(&xprt
->timer
, xprt
->last_used
+ xprt
->idle_timeout
);
759 xprt_init_autodisconnect(struct timer_list
*t
)
761 struct rpc_xprt
*xprt
= from_timer(xprt
, t
, timer
);
763 if (!RB_EMPTY_ROOT(&xprt
->recv_queue
))
765 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
766 xprt
->last_used
= jiffies
;
767 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
769 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
772 bool xprt_lock_connect(struct rpc_xprt
*xprt
,
773 struct rpc_task
*task
,
778 spin_lock(&xprt
->transport_lock
);
779 if (!test_bit(XPRT_LOCKED
, &xprt
->state
))
781 if (xprt
->snd_task
!= task
)
783 xprt
->snd_task
= cookie
;
786 spin_unlock(&xprt
->transport_lock
);
790 void xprt_unlock_connect(struct rpc_xprt
*xprt
, void *cookie
)
792 spin_lock(&xprt
->transport_lock
);
793 if (xprt
->snd_task
!= cookie
)
795 if (!test_bit(XPRT_LOCKED
, &xprt
->state
))
797 xprt
->snd_task
=NULL
;
798 xprt
->ops
->release_xprt(xprt
, NULL
);
799 xprt_schedule_autodisconnect(xprt
);
801 spin_unlock(&xprt
->transport_lock
);
802 wake_up_bit(&xprt
->state
, XPRT_LOCKED
);
806 * xprt_connect - schedule a transport connect operation
807 * @task: RPC task that is requesting the connect
810 void xprt_connect(struct rpc_task
*task
)
812 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
814 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task
->tk_pid
,
815 xprt
, (xprt_connected(xprt
) ? "is" : "is not"));
817 if (!xprt_bound(xprt
)) {
818 task
->tk_status
= -EAGAIN
;
821 if (!xprt_lock_write(xprt
, task
))
824 if (test_and_clear_bit(XPRT_CLOSE_WAIT
, &xprt
->state
))
825 xprt
->ops
->close(xprt
);
827 if (!xprt_connected(xprt
)) {
828 task
->tk_rqstp
->rq_connect_cookie
= xprt
->connect_cookie
;
829 rpc_sleep_on_timeout(&xprt
->pending
, task
, NULL
,
830 xprt_request_timeout(task
->tk_rqstp
));
832 if (test_bit(XPRT_CLOSING
, &xprt
->state
))
834 if (xprt_test_and_set_connecting(xprt
))
837 if (!xprt_connected(xprt
)) {
838 xprt
->stat
.connect_start
= jiffies
;
839 xprt
->ops
->connect(xprt
, task
);
841 xprt_clear_connecting(xprt
);
843 rpc_wake_up_queued_task(&xprt
->pending
, task
);
846 xprt_release_write(xprt
, task
);
850 * xprt_reconnect_delay - compute the wait before scheduling a connect
851 * @xprt: transport instance
854 unsigned long xprt_reconnect_delay(const struct rpc_xprt
*xprt
)
856 unsigned long start
, now
= jiffies
;
858 start
= xprt
->stat
.connect_start
+ xprt
->reestablish_timeout
;
859 if (time_after(start
, now
))
863 EXPORT_SYMBOL_GPL(xprt_reconnect_delay
);
866 * xprt_reconnect_backoff - compute the new re-establish timeout
867 * @xprt: transport instance
868 * @init_to: initial reestablish timeout
871 void xprt_reconnect_backoff(struct rpc_xprt
*xprt
, unsigned long init_to
)
873 xprt
->reestablish_timeout
<<= 1;
874 if (xprt
->reestablish_timeout
> xprt
->max_reconnect_timeout
)
875 xprt
->reestablish_timeout
= xprt
->max_reconnect_timeout
;
876 if (xprt
->reestablish_timeout
< init_to
)
877 xprt
->reestablish_timeout
= init_to
;
879 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff
);
881 enum xprt_xid_rb_cmp
{
886 static enum xprt_xid_rb_cmp
887 xprt_xid_cmp(__be32 xid1
, __be32 xid2
)
891 if ((__force u32
)xid1
< (__force u32
)xid2
)
896 static struct rpc_rqst
*
897 xprt_request_rb_find(struct rpc_xprt
*xprt
, __be32 xid
)
899 struct rb_node
*n
= xprt
->recv_queue
.rb_node
;
900 struct rpc_rqst
*req
;
903 req
= rb_entry(n
, struct rpc_rqst
, rq_recv
);
904 switch (xprt_xid_cmp(xid
, req
->rq_xid
)) {
919 xprt_request_rb_insert(struct rpc_xprt
*xprt
, struct rpc_rqst
*new)
921 struct rb_node
**p
= &xprt
->recv_queue
.rb_node
;
922 struct rb_node
*n
= NULL
;
923 struct rpc_rqst
*req
;
927 req
= rb_entry(n
, struct rpc_rqst
, rq_recv
);
928 switch(xprt_xid_cmp(new->rq_xid
, req
->rq_xid
)) {
936 WARN_ON_ONCE(new != req
);
940 rb_link_node(&new->rq_recv
, n
, p
);
941 rb_insert_color(&new->rq_recv
, &xprt
->recv_queue
);
945 xprt_request_rb_remove(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
947 rb_erase(&req
->rq_recv
, &xprt
->recv_queue
);
951 * xprt_lookup_rqst - find an RPC request corresponding to an XID
952 * @xprt: transport on which the original request was transmitted
953 * @xid: RPC XID of incoming reply
955 * Caller holds xprt->queue_lock.
957 struct rpc_rqst
*xprt_lookup_rqst(struct rpc_xprt
*xprt
, __be32 xid
)
959 struct rpc_rqst
*entry
;
961 entry
= xprt_request_rb_find(xprt
, xid
);
963 trace_xprt_lookup_rqst(xprt
, xid
, 0);
964 entry
->rq_rtt
= ktime_sub(ktime_get(), entry
->rq_xtime
);
968 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
970 trace_xprt_lookup_rqst(xprt
, xid
, -ENOENT
);
971 xprt
->stat
.bad_xids
++;
974 EXPORT_SYMBOL_GPL(xprt_lookup_rqst
);
977 xprt_is_pinned_rqst(struct rpc_rqst
*req
)
979 return atomic_read(&req
->rq_pin
) != 0;
983 * xprt_pin_rqst - Pin a request on the transport receive list
984 * @req: Request to pin
986 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
987 * so should be holding xprt->queue_lock.
989 void xprt_pin_rqst(struct rpc_rqst
*req
)
991 atomic_inc(&req
->rq_pin
);
993 EXPORT_SYMBOL_GPL(xprt_pin_rqst
);
996 * xprt_unpin_rqst - Unpin a request on the transport receive list
997 * @req: Request to pin
999 * Caller should be holding xprt->queue_lock.
1001 void xprt_unpin_rqst(struct rpc_rqst
*req
)
1003 if (!test_bit(RPC_TASK_MSG_PIN_WAIT
, &req
->rq_task
->tk_runstate
)) {
1004 atomic_dec(&req
->rq_pin
);
1007 if (atomic_dec_and_test(&req
->rq_pin
))
1008 wake_up_var(&req
->rq_pin
);
1010 EXPORT_SYMBOL_GPL(xprt_unpin_rqst
);
1012 static void xprt_wait_on_pinned_rqst(struct rpc_rqst
*req
)
1014 wait_var_event(&req
->rq_pin
, !xprt_is_pinned_rqst(req
));
1018 xprt_request_data_received(struct rpc_task
*task
)
1020 return !test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
) &&
1021 READ_ONCE(task
->tk_rqstp
->rq_reply_bytes_recvd
) != 0;
1025 xprt_request_need_enqueue_receive(struct rpc_task
*task
, struct rpc_rqst
*req
)
1027 return !test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
) &&
1028 READ_ONCE(task
->tk_rqstp
->rq_reply_bytes_recvd
) == 0;
1032 * xprt_request_enqueue_receive - Add an request to the receive queue
1037 xprt_request_enqueue_receive(struct rpc_task
*task
)
1039 struct rpc_rqst
*req
= task
->tk_rqstp
;
1040 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1042 if (!xprt_request_need_enqueue_receive(task
, req
))
1044 spin_lock(&xprt
->queue_lock
);
1046 /* Update the softirq receive buffer */
1047 memcpy(&req
->rq_private_buf
, &req
->rq_rcv_buf
,
1048 sizeof(req
->rq_private_buf
));
1050 /* Add request to the receive list */
1051 xprt_request_rb_insert(xprt
, req
);
1052 set_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
);
1053 spin_unlock(&xprt
->queue_lock
);
1055 /* Turn off autodisconnect */
1056 del_singleshot_timer_sync(&xprt
->timer
);
1060 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1063 * Caller must hold xprt->queue_lock.
1066 xprt_request_dequeue_receive_locked(struct rpc_task
*task
)
1068 struct rpc_rqst
*req
= task
->tk_rqstp
;
1070 if (test_and_clear_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
))
1071 xprt_request_rb_remove(req
->rq_xprt
, req
);
1075 * xprt_update_rtt - Update RPC RTT statistics
1076 * @task: RPC request that recently completed
1078 * Caller holds xprt->queue_lock.
1080 void xprt_update_rtt(struct rpc_task
*task
)
1082 struct rpc_rqst
*req
= task
->tk_rqstp
;
1083 struct rpc_rtt
*rtt
= task
->tk_client
->cl_rtt
;
1084 unsigned int timer
= task
->tk_msg
.rpc_proc
->p_timer
;
1085 long m
= usecs_to_jiffies(ktime_to_us(req
->rq_rtt
));
1088 if (req
->rq_ntrans
== 1)
1089 rpc_update_rtt(rtt
, timer
, m
);
1090 rpc_set_timeo(rtt
, timer
, req
->rq_ntrans
- 1);
1093 EXPORT_SYMBOL_GPL(xprt_update_rtt
);
1096 * xprt_complete_rqst - called when reply processing is complete
1097 * @task: RPC request that recently completed
1098 * @copied: actual number of bytes received from the transport
1100 * Caller holds xprt->queue_lock.
1102 void xprt_complete_rqst(struct rpc_task
*task
, int copied
)
1104 struct rpc_rqst
*req
= task
->tk_rqstp
;
1105 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1107 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
1108 task
->tk_pid
, ntohl(req
->rq_xid
), copied
);
1109 trace_xprt_complete_rqst(xprt
, req
->rq_xid
, copied
);
1113 req
->rq_private_buf
.len
= copied
;
1114 /* Ensure all writes are done before we update */
1115 /* req->rq_reply_bytes_recvd */
1117 req
->rq_reply_bytes_recvd
= copied
;
1118 xprt_request_dequeue_receive_locked(task
);
1119 rpc_wake_up_queued_task(&xprt
->pending
, task
);
1121 EXPORT_SYMBOL_GPL(xprt_complete_rqst
);
1123 static void xprt_timer(struct rpc_task
*task
)
1125 struct rpc_rqst
*req
= task
->tk_rqstp
;
1126 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1128 if (task
->tk_status
!= -ETIMEDOUT
)
1131 trace_xprt_timer(xprt
, req
->rq_xid
, task
->tk_status
);
1132 if (!req
->rq_reply_bytes_recvd
) {
1133 if (xprt
->ops
->timer
)
1134 xprt
->ops
->timer(xprt
, task
);
1136 task
->tk_status
= 0;
1140 * xprt_wait_for_reply_request_def - wait for reply
1141 * @task: pointer to rpc_task
1143 * Set a request's retransmit timeout based on the transport's
1144 * default timeout parameters. Used by transports that don't adjust
1145 * the retransmit timeout based on round-trip time estimation,
1146 * and put the task to sleep on the pending queue.
1148 void xprt_wait_for_reply_request_def(struct rpc_task
*task
)
1150 struct rpc_rqst
*req
= task
->tk_rqstp
;
1152 rpc_sleep_on_timeout(&req
->rq_xprt
->pending
, task
, xprt_timer
,
1153 xprt_request_timeout(req
));
1155 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def
);
1158 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1159 * @task: pointer to rpc_task
1161 * Set a request's retransmit timeout using the RTT estimator,
1162 * and put the task to sleep on the pending queue.
1164 void xprt_wait_for_reply_request_rtt(struct rpc_task
*task
)
1166 int timer
= task
->tk_msg
.rpc_proc
->p_timer
;
1167 struct rpc_clnt
*clnt
= task
->tk_client
;
1168 struct rpc_rtt
*rtt
= clnt
->cl_rtt
;
1169 struct rpc_rqst
*req
= task
->tk_rqstp
;
1170 unsigned long max_timeout
= clnt
->cl_timeout
->to_maxval
;
1171 unsigned long timeout
;
1173 timeout
= rpc_calc_rto(rtt
, timer
);
1174 timeout
<<= rpc_ntimeo(rtt
, timer
) + req
->rq_retries
;
1175 if (timeout
> max_timeout
|| timeout
== 0)
1176 timeout
= max_timeout
;
1177 rpc_sleep_on_timeout(&req
->rq_xprt
->pending
, task
, xprt_timer
,
1180 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt
);
1183 * xprt_request_wait_receive - wait for the reply to an RPC request
1184 * @task: RPC task about to send a request
1187 void xprt_request_wait_receive(struct rpc_task
*task
)
1189 struct rpc_rqst
*req
= task
->tk_rqstp
;
1190 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1192 if (!test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
))
1195 * Sleep on the pending queue if we're expecting a reply.
1196 * The spinlock ensures atomicity between the test of
1197 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1199 spin_lock(&xprt
->queue_lock
);
1200 if (test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
)) {
1201 xprt
->ops
->wait_for_reply_request(task
);
1203 * Send an extra queue wakeup call if the
1204 * connection was dropped in case the call to
1205 * rpc_sleep_on() raced.
1207 if (xprt_request_retransmit_after_disconnect(task
))
1208 rpc_wake_up_queued_task_set_status(&xprt
->pending
,
1211 spin_unlock(&xprt
->queue_lock
);
1215 xprt_request_need_enqueue_transmit(struct rpc_task
*task
, struct rpc_rqst
*req
)
1217 return !test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
);
1221 * xprt_request_enqueue_transmit - queue a task for transmission
1222 * @task: pointer to rpc_task
1224 * Add a task to the transmission queue.
1227 xprt_request_enqueue_transmit(struct rpc_task
*task
)
1229 struct rpc_rqst
*pos
, *req
= task
->tk_rqstp
;
1230 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1232 if (xprt_request_need_enqueue_transmit(task
, req
)) {
1233 req
->rq_bytes_sent
= 0;
1234 spin_lock(&xprt
->queue_lock
);
1236 * Requests that carry congestion control credits are added
1237 * to the head of the list to avoid starvation issues.
1240 xprt_clear_congestion_window_wait(xprt
);
1241 list_for_each_entry(pos
, &xprt
->xmit_queue
, rq_xmit
) {
1244 /* Note: req is added _before_ pos */
1245 list_add_tail(&req
->rq_xmit
, &pos
->rq_xmit
);
1246 INIT_LIST_HEAD(&req
->rq_xmit2
);
1247 trace_xprt_enq_xmit(task
, 1);
1250 } else if (RPC_IS_SWAPPER(task
)) {
1251 list_for_each_entry(pos
, &xprt
->xmit_queue
, rq_xmit
) {
1252 if (pos
->rq_cong
|| pos
->rq_bytes_sent
)
1254 if (RPC_IS_SWAPPER(pos
->rq_task
))
1256 /* Note: req is added _before_ pos */
1257 list_add_tail(&req
->rq_xmit
, &pos
->rq_xmit
);
1258 INIT_LIST_HEAD(&req
->rq_xmit2
);
1259 trace_xprt_enq_xmit(task
, 2);
1262 } else if (!req
->rq_seqno
) {
1263 list_for_each_entry(pos
, &xprt
->xmit_queue
, rq_xmit
) {
1264 if (pos
->rq_task
->tk_owner
!= task
->tk_owner
)
1266 list_add_tail(&req
->rq_xmit2
, &pos
->rq_xmit2
);
1267 INIT_LIST_HEAD(&req
->rq_xmit
);
1268 trace_xprt_enq_xmit(task
, 3);
1272 list_add_tail(&req
->rq_xmit
, &xprt
->xmit_queue
);
1273 INIT_LIST_HEAD(&req
->rq_xmit2
);
1274 trace_xprt_enq_xmit(task
, 4);
1276 set_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
);
1277 spin_unlock(&xprt
->queue_lock
);
1282 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1283 * @task: pointer to rpc_task
1285 * Remove a task from the transmission queue
1286 * Caller must hold xprt->queue_lock
1289 xprt_request_dequeue_transmit_locked(struct rpc_task
*task
)
1291 struct rpc_rqst
*req
= task
->tk_rqstp
;
1293 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1295 if (!list_empty(&req
->rq_xmit
)) {
1296 list_del(&req
->rq_xmit
);
1297 if (!list_empty(&req
->rq_xmit2
)) {
1298 struct rpc_rqst
*next
= list_first_entry(&req
->rq_xmit2
,
1299 struct rpc_rqst
, rq_xmit2
);
1300 list_del(&req
->rq_xmit2
);
1301 list_add_tail(&next
->rq_xmit
, &next
->rq_xprt
->xmit_queue
);
1304 list_del(&req
->rq_xmit2
);
1308 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1309 * @task: pointer to rpc_task
1311 * Remove a task from the transmission queue
1314 xprt_request_dequeue_transmit(struct rpc_task
*task
)
1316 struct rpc_rqst
*req
= task
->tk_rqstp
;
1317 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1319 spin_lock(&xprt
->queue_lock
);
1320 xprt_request_dequeue_transmit_locked(task
);
1321 spin_unlock(&xprt
->queue_lock
);
1325 * xprt_request_prepare - prepare an encoded request for transport
1326 * @req: pointer to rpc_rqst
1328 * Calls into the transport layer to do whatever is needed to prepare
1329 * the request for transmission or receive.
1332 xprt_request_prepare(struct rpc_rqst
*req
)
1334 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1336 if (xprt
->ops
->prepare_request
)
1337 xprt
->ops
->prepare_request(req
);
1341 * xprt_request_need_retransmit - Test if a task needs retransmission
1342 * @task: pointer to rpc_task
1344 * Test for whether a connection breakage requires the task to retransmit
1347 xprt_request_need_retransmit(struct rpc_task
*task
)
1349 return xprt_request_retransmit_after_disconnect(task
);
1353 * xprt_prepare_transmit - reserve the transport before sending a request
1354 * @task: RPC task about to send a request
1357 bool xprt_prepare_transmit(struct rpc_task
*task
)
1359 struct rpc_rqst
*req
= task
->tk_rqstp
;
1360 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1362 dprintk("RPC: %5u xprt_prepare_transmit\n", task
->tk_pid
);
1364 if (!xprt_lock_write(xprt
, task
)) {
1365 /* Race breaker: someone may have transmitted us */
1366 if (!test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1367 rpc_wake_up_queued_task_set_status(&xprt
->sending
,
1375 void xprt_end_transmit(struct rpc_task
*task
)
1377 xprt_release_write(task
->tk_rqstp
->rq_xprt
, task
);
1381 * xprt_request_transmit - send an RPC request on a transport
1382 * @req: pointer to request to transmit
1383 * @snd_task: RPC task that owns the transport lock
1385 * This performs the transmission of a single request.
1386 * Note that if the request is not the same as snd_task, then it
1387 * does need to be pinned.
1388 * Returns '0' on success.
1391 xprt_request_transmit(struct rpc_rqst
*req
, struct rpc_task
*snd_task
)
1393 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1394 struct rpc_task
*task
= req
->rq_task
;
1395 unsigned int connect_cookie
;
1396 int is_retrans
= RPC_WAS_SENT(task
);
1399 if (!req
->rq_bytes_sent
) {
1400 if (xprt_request_data_received(task
)) {
1404 /* Verify that our message lies in the RPCSEC_GSS window */
1405 if (rpcauth_xmit_need_reencode(task
)) {
1409 if (task
->tk_ops
->rpc_call_prepare_transmit
) {
1410 task
->tk_ops
->rpc_call_prepare_transmit(task
,
1412 status
= task
->tk_status
;
1416 if (RPC_SIGNALLED(task
)) {
1417 status
= -ERESTARTSYS
;
1423 * Update req->rq_ntrans before transmitting to avoid races with
1424 * xprt_update_rtt(), which needs to know that it is recording a
1425 * reply to the first transmission.
1429 connect_cookie
= xprt
->connect_cookie
;
1430 status
= xprt
->ops
->send_request(req
);
1433 trace_xprt_transmit(req
, status
);
1438 task
->tk_client
->cl_stats
->rpcretrans
++;
1440 xprt_inject_disconnect(xprt
);
1442 task
->tk_flags
|= RPC_TASK_SENT
;
1443 spin_lock(&xprt
->transport_lock
);
1446 xprt
->stat
.req_u
+= xprt
->stat
.sends
- xprt
->stat
.recvs
;
1447 xprt
->stat
.bklog_u
+= xprt
->backlog
.qlen
;
1448 xprt
->stat
.sending_u
+= xprt
->sending
.qlen
;
1449 xprt
->stat
.pending_u
+= xprt
->pending
.qlen
;
1450 spin_unlock(&xprt
->transport_lock
);
1452 req
->rq_connect_cookie
= connect_cookie
;
1454 trace_xprt_transmit(req
, status
);
1455 xprt_request_dequeue_transmit(task
);
1456 rpc_wake_up_queued_task_set_status(&xprt
->sending
, task
, status
);
1461 * xprt_transmit - send an RPC request on a transport
1462 * @task: controlling RPC task
1464 * Attempts to drain the transmit queue. On exit, either the transport
1465 * signalled an error that needs to be handled before transmission can
1466 * resume, or @task finished transmitting, and detected that it already
1470 xprt_transmit(struct rpc_task
*task
)
1472 struct rpc_rqst
*next
, *req
= task
->tk_rqstp
;
1473 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1476 spin_lock(&xprt
->queue_lock
);
1477 while (!list_empty(&xprt
->xmit_queue
)) {
1478 next
= list_first_entry(&xprt
->xmit_queue
,
1479 struct rpc_rqst
, rq_xmit
);
1480 xprt_pin_rqst(next
);
1481 spin_unlock(&xprt
->queue_lock
);
1482 status
= xprt_request_transmit(next
, task
);
1483 if (status
== -EBADMSG
&& next
!= req
)
1486 spin_lock(&xprt
->queue_lock
);
1487 xprt_unpin_rqst(next
);
1489 if (!xprt_request_data_received(task
) ||
1490 test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1492 } else if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1493 task
->tk_status
= status
;
1496 spin_unlock(&xprt
->queue_lock
);
1499 static void xprt_add_backlog(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1501 set_bit(XPRT_CONGESTED
, &xprt
->state
);
1502 rpc_sleep_on(&xprt
->backlog
, task
, NULL
);
1505 static void xprt_wake_up_backlog(struct rpc_xprt
*xprt
)
1507 if (rpc_wake_up_next(&xprt
->backlog
) == NULL
)
1508 clear_bit(XPRT_CONGESTED
, &xprt
->state
);
1511 static bool xprt_throttle_congested(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1515 if (!test_bit(XPRT_CONGESTED
, &xprt
->state
))
1517 spin_lock(&xprt
->reserve_lock
);
1518 if (test_bit(XPRT_CONGESTED
, &xprt
->state
)) {
1519 rpc_sleep_on(&xprt
->backlog
, task
, NULL
);
1522 spin_unlock(&xprt
->reserve_lock
);
1527 static struct rpc_rqst
*xprt_dynamic_alloc_slot(struct rpc_xprt
*xprt
)
1529 struct rpc_rqst
*req
= ERR_PTR(-EAGAIN
);
1531 if (xprt
->num_reqs
>= xprt
->max_reqs
)
1534 spin_unlock(&xprt
->reserve_lock
);
1535 req
= kzalloc(sizeof(struct rpc_rqst
), GFP_NOFS
);
1536 spin_lock(&xprt
->reserve_lock
);
1540 req
= ERR_PTR(-ENOMEM
);
1545 static bool xprt_dynamic_free_slot(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
1547 if (xprt
->num_reqs
> xprt
->min_reqs
) {
1555 void xprt_alloc_slot(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1557 struct rpc_rqst
*req
;
1559 spin_lock(&xprt
->reserve_lock
);
1560 if (!list_empty(&xprt
->free
)) {
1561 req
= list_entry(xprt
->free
.next
, struct rpc_rqst
, rq_list
);
1562 list_del(&req
->rq_list
);
1565 req
= xprt_dynamic_alloc_slot(xprt
);
1568 switch (PTR_ERR(req
)) {
1570 dprintk("RPC: dynamic allocation of request slot "
1571 "failed! Retrying\n");
1572 task
->tk_status
= -ENOMEM
;
1575 xprt_add_backlog(xprt
, task
);
1576 dprintk("RPC: waiting for request slot\n");
1579 task
->tk_status
= -EAGAIN
;
1581 spin_unlock(&xprt
->reserve_lock
);
1584 xprt
->stat
.max_slots
= max_t(unsigned int, xprt
->stat
.max_slots
,
1586 spin_unlock(&xprt
->reserve_lock
);
1588 task
->tk_status
= 0;
1589 task
->tk_rqstp
= req
;
1591 EXPORT_SYMBOL_GPL(xprt_alloc_slot
);
1593 void xprt_free_slot(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
1595 spin_lock(&xprt
->reserve_lock
);
1596 if (!xprt_dynamic_free_slot(xprt
, req
)) {
1597 memset(req
, 0, sizeof(*req
)); /* mark unused */
1598 list_add(&req
->rq_list
, &xprt
->free
);
1600 xprt_wake_up_backlog(xprt
);
1601 spin_unlock(&xprt
->reserve_lock
);
1603 EXPORT_SYMBOL_GPL(xprt_free_slot
);
1605 static void xprt_free_all_slots(struct rpc_xprt
*xprt
)
1607 struct rpc_rqst
*req
;
1608 while (!list_empty(&xprt
->free
)) {
1609 req
= list_first_entry(&xprt
->free
, struct rpc_rqst
, rq_list
);
1610 list_del(&req
->rq_list
);
1615 struct rpc_xprt
*xprt_alloc(struct net
*net
, size_t size
,
1616 unsigned int num_prealloc
,
1617 unsigned int max_alloc
)
1619 struct rpc_xprt
*xprt
;
1620 struct rpc_rqst
*req
;
1623 xprt
= kzalloc(size
, GFP_KERNEL
);
1627 xprt_init(xprt
, net
);
1629 for (i
= 0; i
< num_prealloc
; i
++) {
1630 req
= kzalloc(sizeof(struct rpc_rqst
), GFP_KERNEL
);
1633 list_add(&req
->rq_list
, &xprt
->free
);
1635 if (max_alloc
> num_prealloc
)
1636 xprt
->max_reqs
= max_alloc
;
1638 xprt
->max_reqs
= num_prealloc
;
1639 xprt
->min_reqs
= num_prealloc
;
1640 xprt
->num_reqs
= num_prealloc
;
1649 EXPORT_SYMBOL_GPL(xprt_alloc
);
1651 void xprt_free(struct rpc_xprt
*xprt
)
1653 put_net(xprt
->xprt_net
);
1654 xprt_free_all_slots(xprt
);
1655 kfree_rcu(xprt
, rcu
);
1657 EXPORT_SYMBOL_GPL(xprt_free
);
1660 xprt_init_connect_cookie(struct rpc_rqst
*req
, struct rpc_xprt
*xprt
)
1662 req
->rq_connect_cookie
= xprt_connect_cookie(xprt
) - 1;
1666 xprt_alloc_xid(struct rpc_xprt
*xprt
)
1670 spin_lock(&xprt
->reserve_lock
);
1671 xid
= (__force __be32
)xprt
->xid
++;
1672 spin_unlock(&xprt
->reserve_lock
);
1677 xprt_init_xid(struct rpc_xprt
*xprt
)
1679 xprt
->xid
= prandom_u32();
1683 xprt_request_init(struct rpc_task
*task
)
1685 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1686 struct rpc_rqst
*req
= task
->tk_rqstp
;
1688 req
->rq_task
= task
;
1689 req
->rq_xprt
= xprt
;
1690 req
->rq_buffer
= NULL
;
1691 req
->rq_xid
= xprt_alloc_xid(xprt
);
1692 xprt_init_connect_cookie(req
, xprt
);
1693 req
->rq_snd_buf
.len
= 0;
1694 req
->rq_snd_buf
.buflen
= 0;
1695 req
->rq_rcv_buf
.len
= 0;
1696 req
->rq_rcv_buf
.buflen
= 0;
1697 req
->rq_snd_buf
.bvec
= NULL
;
1698 req
->rq_rcv_buf
.bvec
= NULL
;
1699 req
->rq_release_snd_buf
= NULL
;
1700 xprt_init_majortimeo(task
, req
);
1701 dprintk("RPC: %5u reserved req %p xid %08x\n", task
->tk_pid
,
1702 req
, ntohl(req
->rq_xid
));
1706 xprt_do_reserve(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1708 xprt
->ops
->alloc_slot(xprt
, task
);
1709 if (task
->tk_rqstp
!= NULL
)
1710 xprt_request_init(task
);
1714 * xprt_reserve - allocate an RPC request slot
1715 * @task: RPC task requesting a slot allocation
1717 * If the transport is marked as being congested, or if no more
1718 * slots are available, place the task on the transport's
1721 void xprt_reserve(struct rpc_task
*task
)
1723 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1725 task
->tk_status
= 0;
1726 if (task
->tk_rqstp
!= NULL
)
1729 task
->tk_status
= -EAGAIN
;
1730 if (!xprt_throttle_congested(xprt
, task
))
1731 xprt_do_reserve(xprt
, task
);
1735 * xprt_retry_reserve - allocate an RPC request slot
1736 * @task: RPC task requesting a slot allocation
1738 * If no more slots are available, place the task on the transport's
1740 * Note that the only difference with xprt_reserve is that we now
1741 * ignore the value of the XPRT_CONGESTED flag.
1743 void xprt_retry_reserve(struct rpc_task
*task
)
1745 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1747 task
->tk_status
= 0;
1748 if (task
->tk_rqstp
!= NULL
)
1751 task
->tk_status
= -EAGAIN
;
1752 xprt_do_reserve(xprt
, task
);
1756 xprt_request_dequeue_all(struct rpc_task
*task
, struct rpc_rqst
*req
)
1758 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1760 if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
) ||
1761 test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
) ||
1762 xprt_is_pinned_rqst(req
)) {
1763 spin_lock(&xprt
->queue_lock
);
1764 xprt_request_dequeue_transmit_locked(task
);
1765 xprt_request_dequeue_receive_locked(task
);
1766 while (xprt_is_pinned_rqst(req
)) {
1767 set_bit(RPC_TASK_MSG_PIN_WAIT
, &task
->tk_runstate
);
1768 spin_unlock(&xprt
->queue_lock
);
1769 xprt_wait_on_pinned_rqst(req
);
1770 spin_lock(&xprt
->queue_lock
);
1771 clear_bit(RPC_TASK_MSG_PIN_WAIT
, &task
->tk_runstate
);
1773 spin_unlock(&xprt
->queue_lock
);
1778 * xprt_release - release an RPC request slot
1779 * @task: task which is finished with the slot
1782 void xprt_release(struct rpc_task
*task
)
1784 struct rpc_xprt
*xprt
;
1785 struct rpc_rqst
*req
= task
->tk_rqstp
;
1788 if (task
->tk_client
) {
1789 xprt
= task
->tk_xprt
;
1790 xprt_release_write(xprt
, task
);
1795 xprt
= req
->rq_xprt
;
1796 xprt_request_dequeue_all(task
, req
);
1797 spin_lock(&xprt
->transport_lock
);
1798 xprt
->ops
->release_xprt(xprt
, task
);
1799 if (xprt
->ops
->release_request
)
1800 xprt
->ops
->release_request(task
);
1801 xprt_schedule_autodisconnect(xprt
);
1802 spin_unlock(&xprt
->transport_lock
);
1804 xprt
->ops
->buf_free(task
);
1805 xprt_inject_disconnect(xprt
);
1806 xdr_free_bvec(&req
->rq_rcv_buf
);
1807 xdr_free_bvec(&req
->rq_snd_buf
);
1808 if (req
->rq_cred
!= NULL
)
1809 put_rpccred(req
->rq_cred
);
1810 task
->tk_rqstp
= NULL
;
1811 if (req
->rq_release_snd_buf
)
1812 req
->rq_release_snd_buf(req
);
1814 dprintk("RPC: %5u release request %p\n", task
->tk_pid
, req
);
1815 if (likely(!bc_prealloc(req
)))
1816 xprt
->ops
->free_slot(xprt
, req
);
1818 xprt_free_bc_request(req
);
1821 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1823 xprt_init_bc_request(struct rpc_rqst
*req
, struct rpc_task
*task
)
1825 struct xdr_buf
*xbufp
= &req
->rq_snd_buf
;
1827 task
->tk_rqstp
= req
;
1828 req
->rq_task
= task
;
1829 xprt_init_connect_cookie(req
, req
->rq_xprt
);
1831 * Set up the xdr_buf length.
1832 * This also indicates that the buffer is XDR encoded already.
1834 xbufp
->len
= xbufp
->head
[0].iov_len
+ xbufp
->page_len
+
1835 xbufp
->tail
[0].iov_len
;
1839 static void xprt_init(struct rpc_xprt
*xprt
, struct net
*net
)
1841 kref_init(&xprt
->kref
);
1843 spin_lock_init(&xprt
->transport_lock
);
1844 spin_lock_init(&xprt
->reserve_lock
);
1845 spin_lock_init(&xprt
->queue_lock
);
1847 INIT_LIST_HEAD(&xprt
->free
);
1848 xprt
->recv_queue
= RB_ROOT
;
1849 INIT_LIST_HEAD(&xprt
->xmit_queue
);
1850 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1851 spin_lock_init(&xprt
->bc_pa_lock
);
1852 INIT_LIST_HEAD(&xprt
->bc_pa_list
);
1853 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1854 INIT_LIST_HEAD(&xprt
->xprt_switch
);
1856 xprt
->last_used
= jiffies
;
1857 xprt
->cwnd
= RPC_INITCWND
;
1858 xprt
->bind_index
= 0;
1860 rpc_init_wait_queue(&xprt
->binding
, "xprt_binding");
1861 rpc_init_wait_queue(&xprt
->pending
, "xprt_pending");
1862 rpc_init_wait_queue(&xprt
->sending
, "xprt_sending");
1863 rpc_init_priority_wait_queue(&xprt
->backlog
, "xprt_backlog");
1865 xprt_init_xid(xprt
);
1867 xprt
->xprt_net
= get_net(net
);
1871 * xprt_create_transport - create an RPC transport
1872 * @args: rpc transport creation arguments
1875 struct rpc_xprt
*xprt_create_transport(struct xprt_create
*args
)
1877 struct rpc_xprt
*xprt
;
1878 struct xprt_class
*t
;
1880 spin_lock(&xprt_list_lock
);
1881 list_for_each_entry(t
, &xprt_list
, list
) {
1882 if (t
->ident
== args
->ident
) {
1883 spin_unlock(&xprt_list_lock
);
1887 spin_unlock(&xprt_list_lock
);
1888 dprintk("RPC: transport (%d) not supported\n", args
->ident
);
1889 return ERR_PTR(-EIO
);
1892 xprt
= t
->setup(args
);
1894 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1898 if (args
->flags
& XPRT_CREATE_NO_IDLE_TIMEOUT
)
1899 xprt
->idle_timeout
= 0;
1900 INIT_WORK(&xprt
->task_cleanup
, xprt_autoclose
);
1901 if (xprt_has_timer(xprt
))
1902 timer_setup(&xprt
->timer
, xprt_init_autodisconnect
, 0);
1904 timer_setup(&xprt
->timer
, NULL
, 0);
1906 if (strlen(args
->servername
) > RPC_MAXNETNAMELEN
) {
1908 return ERR_PTR(-EINVAL
);
1910 xprt
->servername
= kstrdup(args
->servername
, GFP_KERNEL
);
1911 if (xprt
->servername
== NULL
) {
1913 return ERR_PTR(-ENOMEM
);
1916 rpc_xprt_debugfs_register(xprt
);
1918 dprintk("RPC: created transport %p with %u slots\n", xprt
,
1924 static void xprt_destroy_cb(struct work_struct
*work
)
1926 struct rpc_xprt
*xprt
=
1927 container_of(work
, struct rpc_xprt
, task_cleanup
);
1929 rpc_xprt_debugfs_unregister(xprt
);
1930 rpc_destroy_wait_queue(&xprt
->binding
);
1931 rpc_destroy_wait_queue(&xprt
->pending
);
1932 rpc_destroy_wait_queue(&xprt
->sending
);
1933 rpc_destroy_wait_queue(&xprt
->backlog
);
1934 kfree(xprt
->servername
);
1936 * Tear down transport state and free the rpc_xprt
1938 xprt
->ops
->destroy(xprt
);
1942 * xprt_destroy - destroy an RPC transport, killing off all requests.
1943 * @xprt: transport to destroy
1946 static void xprt_destroy(struct rpc_xprt
*xprt
)
1948 dprintk("RPC: destroying transport %p\n", xprt
);
1951 * Exclude transport connect/disconnect handlers and autoclose
1953 wait_on_bit_lock(&xprt
->state
, XPRT_LOCKED
, TASK_UNINTERRUPTIBLE
);
1955 del_timer_sync(&xprt
->timer
);
1958 * Destroy sockets etc from the system workqueue so they can
1959 * safely flush receive work running on rpciod.
1961 INIT_WORK(&xprt
->task_cleanup
, xprt_destroy_cb
);
1962 schedule_work(&xprt
->task_cleanup
);
1965 static void xprt_destroy_kref(struct kref
*kref
)
1967 xprt_destroy(container_of(kref
, struct rpc_xprt
, kref
));
1971 * xprt_get - return a reference to an RPC transport.
1972 * @xprt: pointer to the transport
1975 struct rpc_xprt
*xprt_get(struct rpc_xprt
*xprt
)
1977 if (xprt
!= NULL
&& kref_get_unless_zero(&xprt
->kref
))
1981 EXPORT_SYMBOL_GPL(xprt_get
);
1984 * xprt_put - release a reference to an RPC transport.
1985 * @xprt: pointer to the transport
1988 void xprt_put(struct rpc_xprt
*xprt
)
1991 kref_put(&xprt
->kref
, xprt_destroy_kref
);
1993 EXPORT_SYMBOL_GPL(xprt_put
);