1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/xprt.c
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
8 * The interface works like this:
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
13 * - Next, the caller puts together the RPC message, stuffs it into
14 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
16 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
19 * - When a packet arrives, the data_ready handler walks the list of
20 * pending requests for that transport. If a matching XID is found, the
21 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
41 #include <linux/module.h>
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
55 #include <trace/events/sunrpc.h>
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY RPCDBG_XPRT
70 static void xprt_init(struct rpc_xprt
*xprt
, struct net
*net
);
71 static __be32
xprt_alloc_xid(struct rpc_xprt
*xprt
);
72 static void xprt_destroy(struct rpc_xprt
*xprt
);
74 static DEFINE_SPINLOCK(xprt_list_lock
);
75 static LIST_HEAD(xprt_list
);
77 static unsigned long xprt_request_timeout(const struct rpc_rqst
*req
)
79 unsigned long timeout
= jiffies
+ req
->rq_timeout
;
81 if (time_before(timeout
, req
->rq_majortimeo
))
83 return req
->rq_majortimeo
;
87 * xprt_register_transport - register a transport implementation
88 * @transport: transport to register
90 * If a transport implementation is loaded as a kernel module, it can
91 * call this interface to make itself known to the RPC client.
94 * 0: transport successfully registered
95 * -EEXIST: transport already registered
96 * -EINVAL: transport module being unloaded
98 int xprt_register_transport(struct xprt_class
*transport
)
100 struct xprt_class
*t
;
104 spin_lock(&xprt_list_lock
);
105 list_for_each_entry(t
, &xprt_list
, list
) {
106 /* don't register the same transport class twice */
107 if (t
->ident
== transport
->ident
)
111 list_add_tail(&transport
->list
, &xprt_list
);
112 printk(KERN_INFO
"RPC: Registered %s transport module.\n",
117 spin_unlock(&xprt_list_lock
);
120 EXPORT_SYMBOL_GPL(xprt_register_transport
);
123 * xprt_unregister_transport - unregister a transport implementation
124 * @transport: transport to unregister
127 * 0: transport successfully unregistered
128 * -ENOENT: transport never registered
130 int xprt_unregister_transport(struct xprt_class
*transport
)
132 struct xprt_class
*t
;
136 spin_lock(&xprt_list_lock
);
137 list_for_each_entry(t
, &xprt_list
, list
) {
138 if (t
== transport
) {
140 "RPC: Unregistered %s transport module.\n",
142 list_del_init(&transport
->list
);
149 spin_unlock(&xprt_list_lock
);
152 EXPORT_SYMBOL_GPL(xprt_unregister_transport
);
155 xprt_class_release(const struct xprt_class
*t
)
157 module_put(t
->owner
);
160 static const struct xprt_class
*
161 xprt_class_find_by_ident_locked(int ident
)
163 const struct xprt_class
*t
;
165 list_for_each_entry(t
, &xprt_list
, list
) {
166 if (t
->ident
!= ident
)
168 if (!try_module_get(t
->owner
))
175 static const struct xprt_class
*
176 xprt_class_find_by_ident(int ident
)
178 const struct xprt_class
*t
;
180 spin_lock(&xprt_list_lock
);
181 t
= xprt_class_find_by_ident_locked(ident
);
182 spin_unlock(&xprt_list_lock
);
186 static const struct xprt_class
*
187 xprt_class_find_by_netid_locked(const char *netid
)
189 const struct xprt_class
*t
;
192 list_for_each_entry(t
, &xprt_list
, list
) {
193 for (i
= 0; t
->netid
[i
][0] != '\0'; i
++) {
194 if (strcmp(t
->netid
[i
], netid
) != 0)
196 if (!try_module_get(t
->owner
))
204 static const struct xprt_class
*
205 xprt_class_find_by_netid(const char *netid
)
207 const struct xprt_class
*t
;
209 spin_lock(&xprt_list_lock
);
210 t
= xprt_class_find_by_netid_locked(netid
);
212 spin_unlock(&xprt_list_lock
);
213 request_module("rpc%s", netid
);
214 spin_lock(&xprt_list_lock
);
215 t
= xprt_class_find_by_netid_locked(netid
);
217 spin_unlock(&xprt_list_lock
);
222 * xprt_find_transport_ident - convert a netid into a transport identifier
223 * @netid: transport to load
226 * > 0: transport identifier
227 * -ENOENT: transport module not available
229 int xprt_find_transport_ident(const char *netid
)
231 const struct xprt_class
*t
;
234 t
= xprt_class_find_by_netid(netid
);
238 xprt_class_release(t
);
241 EXPORT_SYMBOL_GPL(xprt_find_transport_ident
);
243 static void xprt_clear_locked(struct rpc_xprt
*xprt
)
245 xprt
->snd_task
= NULL
;
246 if (!test_bit(XPRT_CLOSE_WAIT
, &xprt
->state
)) {
247 smp_mb__before_atomic();
248 clear_bit(XPRT_LOCKED
, &xprt
->state
);
249 smp_mb__after_atomic();
251 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
255 * xprt_reserve_xprt - serialize write access to transports
256 * @task: task that is requesting access to the transport
257 * @xprt: pointer to the target transport
259 * This prevents mixing the payload of separate requests, and prevents
260 * transport connects from colliding with writes. No congestion control
263 int xprt_reserve_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
265 struct rpc_rqst
*req
= task
->tk_rqstp
;
267 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
268 if (task
== xprt
->snd_task
)
272 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
274 xprt
->snd_task
= task
;
277 trace_xprt_reserve_xprt(xprt
, task
);
281 xprt_clear_locked(xprt
);
283 task
->tk_status
= -EAGAIN
;
284 if (RPC_IS_SOFT(task
))
285 rpc_sleep_on_timeout(&xprt
->sending
, task
, NULL
,
286 xprt_request_timeout(req
));
288 rpc_sleep_on(&xprt
->sending
, task
, NULL
);
291 EXPORT_SYMBOL_GPL(xprt_reserve_xprt
);
294 xprt_need_congestion_window_wait(struct rpc_xprt
*xprt
)
296 return test_bit(XPRT_CWND_WAIT
, &xprt
->state
);
300 xprt_set_congestion_window_wait(struct rpc_xprt
*xprt
)
302 if (!list_empty(&xprt
->xmit_queue
)) {
303 /* Peek at head of queue to see if it can make progress */
304 if (list_first_entry(&xprt
->xmit_queue
, struct rpc_rqst
,
308 set_bit(XPRT_CWND_WAIT
, &xprt
->state
);
312 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt
*xprt
)
314 if (!RPCXPRT_CONGESTED(xprt
))
315 clear_bit(XPRT_CWND_WAIT
, &xprt
->state
);
319 * xprt_reserve_xprt_cong - serialize write access to transports
320 * @task: task that is requesting access to the transport
322 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
323 * integrated into the decision of whether a request is allowed to be
324 * woken up and given access to the transport.
325 * Note that the lock is only granted if we know there are free slots.
327 int xprt_reserve_xprt_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
329 struct rpc_rqst
*req
= task
->tk_rqstp
;
331 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
)) {
332 if (task
== xprt
->snd_task
)
337 xprt
->snd_task
= task
;
340 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
342 if (!xprt_need_congestion_window_wait(xprt
)) {
343 xprt
->snd_task
= task
;
347 xprt_clear_locked(xprt
);
349 task
->tk_status
= -EAGAIN
;
350 if (RPC_IS_SOFT(task
))
351 rpc_sleep_on_timeout(&xprt
->sending
, task
, NULL
,
352 xprt_request_timeout(req
));
354 rpc_sleep_on(&xprt
->sending
, task
, NULL
);
357 trace_xprt_reserve_cong(xprt
, task
);
360 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong
);
362 static inline int xprt_lock_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
366 if (test_bit(XPRT_LOCKED
, &xprt
->state
) && xprt
->snd_task
== task
)
368 spin_lock(&xprt
->transport_lock
);
369 retval
= xprt
->ops
->reserve_xprt(xprt
, task
);
370 spin_unlock(&xprt
->transport_lock
);
374 static bool __xprt_lock_write_func(struct rpc_task
*task
, void *data
)
376 struct rpc_xprt
*xprt
= data
;
378 xprt
->snd_task
= task
;
382 static void __xprt_lock_write_next(struct rpc_xprt
*xprt
)
384 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
386 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
388 if (rpc_wake_up_first_on_wq(xprtiod_workqueue
, &xprt
->sending
,
389 __xprt_lock_write_func
, xprt
))
392 xprt_clear_locked(xprt
);
395 static void __xprt_lock_write_next_cong(struct rpc_xprt
*xprt
)
397 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
399 if (test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
401 if (xprt_need_congestion_window_wait(xprt
))
403 if (rpc_wake_up_first_on_wq(xprtiod_workqueue
, &xprt
->sending
,
404 __xprt_lock_write_func
, xprt
))
407 xprt_clear_locked(xprt
);
411 * xprt_release_xprt - allow other requests to use a transport
412 * @xprt: transport with other tasks potentially waiting
413 * @task: task that is releasing access to the transport
415 * Note that "task" can be NULL. No congestion control is provided.
417 void xprt_release_xprt(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
419 if (xprt
->snd_task
== task
) {
420 xprt_clear_locked(xprt
);
421 __xprt_lock_write_next(xprt
);
423 trace_xprt_release_xprt(xprt
, task
);
425 EXPORT_SYMBOL_GPL(xprt_release_xprt
);
428 * xprt_release_xprt_cong - allow other requests to use a transport
429 * @xprt: transport with other tasks potentially waiting
430 * @task: task that is releasing access to the transport
432 * Note that "task" can be NULL. Another task is awoken to use the
433 * transport if the transport's congestion window allows it.
435 void xprt_release_xprt_cong(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
437 if (xprt
->snd_task
== task
) {
438 xprt_clear_locked(xprt
);
439 __xprt_lock_write_next_cong(xprt
);
441 trace_xprt_release_cong(xprt
, task
);
443 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong
);
445 static inline void xprt_release_write(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
447 if (xprt
->snd_task
!= task
)
449 spin_lock(&xprt
->transport_lock
);
450 xprt
->ops
->release_xprt(xprt
, task
);
451 spin_unlock(&xprt
->transport_lock
);
455 * Van Jacobson congestion avoidance. Check if the congestion window
456 * overflowed. Put the task to sleep if this is the case.
459 __xprt_get_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
463 trace_xprt_get_cong(xprt
, req
->rq_task
);
464 if (RPCXPRT_CONGESTED(xprt
)) {
465 xprt_set_congestion_window_wait(xprt
);
469 xprt
->cong
+= RPC_CWNDSCALE
;
474 * Adjust the congestion window, and wake up the next task
475 * that has been sleeping due to congestion
478 __xprt_put_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
483 xprt
->cong
-= RPC_CWNDSCALE
;
484 xprt_test_and_clear_congestion_window_wait(xprt
);
485 trace_xprt_put_cong(xprt
, req
->rq_task
);
486 __xprt_lock_write_next_cong(xprt
);
490 * xprt_request_get_cong - Request congestion control credits
491 * @xprt: pointer to transport
492 * @req: pointer to RPC request
494 * Useful for transports that require congestion control.
497 xprt_request_get_cong(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
503 spin_lock(&xprt
->transport_lock
);
504 ret
= __xprt_get_cong(xprt
, req
) != 0;
505 spin_unlock(&xprt
->transport_lock
);
508 EXPORT_SYMBOL_GPL(xprt_request_get_cong
);
511 * xprt_release_rqst_cong - housekeeping when request is complete
512 * @task: RPC request that recently completed
514 * Useful for transports that require congestion control.
516 void xprt_release_rqst_cong(struct rpc_task
*task
)
518 struct rpc_rqst
*req
= task
->tk_rqstp
;
520 __xprt_put_cong(req
->rq_xprt
, req
);
522 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong
);
524 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt
*xprt
)
526 if (test_and_clear_bit(XPRT_CWND_WAIT
, &xprt
->state
))
527 __xprt_lock_write_next_cong(xprt
);
531 * Clear the congestion window wait flag and wake up the next
532 * entry on xprt->sending
535 xprt_clear_congestion_window_wait(struct rpc_xprt
*xprt
)
537 if (test_and_clear_bit(XPRT_CWND_WAIT
, &xprt
->state
)) {
538 spin_lock(&xprt
->transport_lock
);
539 __xprt_lock_write_next_cong(xprt
);
540 spin_unlock(&xprt
->transport_lock
);
545 * xprt_adjust_cwnd - adjust transport congestion window
546 * @xprt: pointer to xprt
547 * @task: recently completed RPC request used to adjust window
548 * @result: result code of completed RPC request
550 * The transport code maintains an estimate on the maximum number of out-
551 * standing RPC requests, using a smoothed version of the congestion
552 * avoidance implemented in 44BSD. This is basically the Van Jacobson
553 * congestion algorithm: If a retransmit occurs, the congestion window is
554 * halved; otherwise, it is incremented by 1/cwnd when
556 * - a reply is received and
557 * - a full number of requests are outstanding and
558 * - the congestion window hasn't been updated recently.
560 void xprt_adjust_cwnd(struct rpc_xprt
*xprt
, struct rpc_task
*task
, int result
)
562 struct rpc_rqst
*req
= task
->tk_rqstp
;
563 unsigned long cwnd
= xprt
->cwnd
;
565 if (result
>= 0 && cwnd
<= xprt
->cong
) {
566 /* The (cwnd >> 1) term makes sure
567 * the result gets rounded properly. */
568 cwnd
+= (RPC_CWNDSCALE
* RPC_CWNDSCALE
+ (cwnd
>> 1)) / cwnd
;
569 if (cwnd
> RPC_MAXCWND(xprt
))
570 cwnd
= RPC_MAXCWND(xprt
);
571 __xprt_lock_write_next_cong(xprt
);
572 } else if (result
== -ETIMEDOUT
) {
574 if (cwnd
< RPC_CWNDSCALE
)
575 cwnd
= RPC_CWNDSCALE
;
577 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
578 xprt
->cong
, xprt
->cwnd
, cwnd
);
580 __xprt_put_cong(xprt
, req
);
582 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd
);
585 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
586 * @xprt: transport with waiting tasks
587 * @status: result code to plant in each task before waking it
590 void xprt_wake_pending_tasks(struct rpc_xprt
*xprt
, int status
)
593 rpc_wake_up_status(&xprt
->pending
, status
);
595 rpc_wake_up(&xprt
->pending
);
597 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks
);
600 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
603 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
604 * we don't in general want to force a socket disconnection due to
605 * an incomplete RPC call transmission.
607 void xprt_wait_for_buffer_space(struct rpc_xprt
*xprt
)
609 set_bit(XPRT_WRITE_SPACE
, &xprt
->state
);
611 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space
);
614 xprt_clear_write_space_locked(struct rpc_xprt
*xprt
)
616 if (test_and_clear_bit(XPRT_WRITE_SPACE
, &xprt
->state
)) {
617 __xprt_lock_write_next(xprt
);
618 dprintk("RPC: write space: waking waiting task on "
626 * xprt_write_space - wake the task waiting for transport output buffer space
627 * @xprt: transport with waiting tasks
629 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
631 bool xprt_write_space(struct rpc_xprt
*xprt
)
635 if (!test_bit(XPRT_WRITE_SPACE
, &xprt
->state
))
637 spin_lock(&xprt
->transport_lock
);
638 ret
= xprt_clear_write_space_locked(xprt
);
639 spin_unlock(&xprt
->transport_lock
);
642 EXPORT_SYMBOL_GPL(xprt_write_space
);
644 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime
)
646 s64 delta
= ktime_to_ns(ktime_get() - abstime
);
647 return likely(delta
>= 0) ?
648 jiffies
- nsecs_to_jiffies(delta
) :
649 jiffies
+ nsecs_to_jiffies(-delta
);
652 static unsigned long xprt_calc_majortimeo(struct rpc_rqst
*req
)
654 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
655 unsigned long majortimeo
= req
->rq_timeout
;
657 if (to
->to_exponential
)
658 majortimeo
<<= to
->to_retries
;
660 majortimeo
+= to
->to_increment
* to
->to_retries
;
661 if (majortimeo
> to
->to_maxval
|| majortimeo
== 0)
662 majortimeo
= to
->to_maxval
;
666 static void xprt_reset_majortimeo(struct rpc_rqst
*req
)
668 req
->rq_majortimeo
+= xprt_calc_majortimeo(req
);
671 static void xprt_reset_minortimeo(struct rpc_rqst
*req
)
673 req
->rq_minortimeo
+= req
->rq_timeout
;
676 static void xprt_init_majortimeo(struct rpc_task
*task
, struct rpc_rqst
*req
)
678 unsigned long time_init
;
679 struct rpc_xprt
*xprt
= req
->rq_xprt
;
681 if (likely(xprt
&& xprt_connected(xprt
)))
684 time_init
= xprt_abs_ktime_to_jiffies(task
->tk_start
);
685 req
->rq_timeout
= task
->tk_client
->cl_timeout
->to_initval
;
686 req
->rq_majortimeo
= time_init
+ xprt_calc_majortimeo(req
);
687 req
->rq_minortimeo
= time_init
+ req
->rq_timeout
;
691 * xprt_adjust_timeout - adjust timeout values for next retransmit
692 * @req: RPC request containing parameters to use for the adjustment
695 int xprt_adjust_timeout(struct rpc_rqst
*req
)
697 struct rpc_xprt
*xprt
= req
->rq_xprt
;
698 const struct rpc_timeout
*to
= req
->rq_task
->tk_client
->cl_timeout
;
701 if (time_before(jiffies
, req
->rq_majortimeo
)) {
702 if (time_before(jiffies
, req
->rq_minortimeo
))
704 if (to
->to_exponential
)
705 req
->rq_timeout
<<= 1;
707 req
->rq_timeout
+= to
->to_increment
;
708 if (to
->to_maxval
&& req
->rq_timeout
>= to
->to_maxval
)
709 req
->rq_timeout
= to
->to_maxval
;
712 req
->rq_timeout
= to
->to_initval
;
714 xprt_reset_majortimeo(req
);
715 /* Reset the RTT counters == "slow start" */
716 spin_lock(&xprt
->transport_lock
);
717 rpc_init_rtt(req
->rq_task
->tk_client
->cl_rtt
, to
->to_initval
);
718 spin_unlock(&xprt
->transport_lock
);
721 xprt_reset_minortimeo(req
);
723 if (req
->rq_timeout
== 0) {
724 printk(KERN_WARNING
"xprt_adjust_timeout: rq_timeout = 0!\n");
725 req
->rq_timeout
= 5 * HZ
;
730 static void xprt_autoclose(struct work_struct
*work
)
732 struct rpc_xprt
*xprt
=
733 container_of(work
, struct rpc_xprt
, task_cleanup
);
734 unsigned int pflags
= memalloc_nofs_save();
736 trace_xprt_disconnect_auto(xprt
);
737 clear_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
738 xprt
->ops
->close(xprt
);
739 xprt_release_write(xprt
, NULL
);
740 wake_up_bit(&xprt
->state
, XPRT_LOCKED
);
741 memalloc_nofs_restore(pflags
);
745 * xprt_disconnect_done - mark a transport as disconnected
746 * @xprt: transport to flag for disconnect
749 void xprt_disconnect_done(struct rpc_xprt
*xprt
)
751 trace_xprt_disconnect_done(xprt
);
752 spin_lock(&xprt
->transport_lock
);
753 xprt_clear_connected(xprt
);
754 xprt_clear_write_space_locked(xprt
);
755 xprt_clear_congestion_window_wait_locked(xprt
);
756 xprt_wake_pending_tasks(xprt
, -ENOTCONN
);
757 spin_unlock(&xprt
->transport_lock
);
759 EXPORT_SYMBOL_GPL(xprt_disconnect_done
);
762 * xprt_force_disconnect - force a transport to disconnect
763 * @xprt: transport to disconnect
766 void xprt_force_disconnect(struct rpc_xprt
*xprt
)
768 trace_xprt_disconnect_force(xprt
);
770 /* Don't race with the test_bit() in xprt_clear_locked() */
771 spin_lock(&xprt
->transport_lock
);
772 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
773 /* Try to schedule an autoclose RPC call */
774 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
775 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
776 else if (xprt
->snd_task
)
777 rpc_wake_up_queued_task_set_status(&xprt
->pending
,
778 xprt
->snd_task
, -ENOTCONN
);
779 spin_unlock(&xprt
->transport_lock
);
781 EXPORT_SYMBOL_GPL(xprt_force_disconnect
);
784 xprt_connect_cookie(struct rpc_xprt
*xprt
)
786 return READ_ONCE(xprt
->connect_cookie
);
790 xprt_request_retransmit_after_disconnect(struct rpc_task
*task
)
792 struct rpc_rqst
*req
= task
->tk_rqstp
;
793 struct rpc_xprt
*xprt
= req
->rq_xprt
;
795 return req
->rq_connect_cookie
!= xprt_connect_cookie(xprt
) ||
796 !xprt_connected(xprt
);
800 * xprt_conditional_disconnect - force a transport to disconnect
801 * @xprt: transport to disconnect
802 * @cookie: 'connection cookie'
804 * This attempts to break the connection if and only if 'cookie' matches
805 * the current transport 'connection cookie'. It ensures that we don't
806 * try to break the connection more than once when we need to retransmit
807 * a batch of RPC requests.
810 void xprt_conditional_disconnect(struct rpc_xprt
*xprt
, unsigned int cookie
)
812 /* Don't race with the test_bit() in xprt_clear_locked() */
813 spin_lock(&xprt
->transport_lock
);
814 if (cookie
!= xprt
->connect_cookie
)
816 if (test_bit(XPRT_CLOSING
, &xprt
->state
))
818 set_bit(XPRT_CLOSE_WAIT
, &xprt
->state
);
819 /* Try to schedule an autoclose RPC call */
820 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
) == 0)
821 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
822 xprt_wake_pending_tasks(xprt
, -EAGAIN
);
824 spin_unlock(&xprt
->transport_lock
);
828 xprt_has_timer(const struct rpc_xprt
*xprt
)
830 return xprt
->idle_timeout
!= 0;
834 xprt_schedule_autodisconnect(struct rpc_xprt
*xprt
)
835 __must_hold(&xprt
->transport_lock
)
837 xprt
->last_used
= jiffies
;
838 if (RB_EMPTY_ROOT(&xprt
->recv_queue
) && xprt_has_timer(xprt
))
839 mod_timer(&xprt
->timer
, xprt
->last_used
+ xprt
->idle_timeout
);
843 xprt_init_autodisconnect(struct timer_list
*t
)
845 struct rpc_xprt
*xprt
= from_timer(xprt
, t
, timer
);
847 if (!RB_EMPTY_ROOT(&xprt
->recv_queue
))
849 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
850 xprt
->last_used
= jiffies
;
851 if (test_and_set_bit(XPRT_LOCKED
, &xprt
->state
))
853 queue_work(xprtiod_workqueue
, &xprt
->task_cleanup
);
856 bool xprt_lock_connect(struct rpc_xprt
*xprt
,
857 struct rpc_task
*task
,
862 spin_lock(&xprt
->transport_lock
);
863 if (!test_bit(XPRT_LOCKED
, &xprt
->state
))
865 if (xprt
->snd_task
!= task
)
867 xprt
->snd_task
= cookie
;
870 spin_unlock(&xprt
->transport_lock
);
874 void xprt_unlock_connect(struct rpc_xprt
*xprt
, void *cookie
)
876 spin_lock(&xprt
->transport_lock
);
877 if (xprt
->snd_task
!= cookie
)
879 if (!test_bit(XPRT_LOCKED
, &xprt
->state
))
881 xprt
->snd_task
=NULL
;
882 xprt
->ops
->release_xprt(xprt
, NULL
);
883 xprt_schedule_autodisconnect(xprt
);
885 spin_unlock(&xprt
->transport_lock
);
886 wake_up_bit(&xprt
->state
, XPRT_LOCKED
);
890 * xprt_connect - schedule a transport connect operation
891 * @task: RPC task that is requesting the connect
894 void xprt_connect(struct rpc_task
*task
)
896 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
898 trace_xprt_connect(xprt
);
900 if (!xprt_bound(xprt
)) {
901 task
->tk_status
= -EAGAIN
;
904 if (!xprt_lock_write(xprt
, task
))
907 if (test_and_clear_bit(XPRT_CLOSE_WAIT
, &xprt
->state
)) {
908 trace_xprt_disconnect_cleanup(xprt
);
909 xprt
->ops
->close(xprt
);
912 if (!xprt_connected(xprt
)) {
913 task
->tk_rqstp
->rq_connect_cookie
= xprt
->connect_cookie
;
914 rpc_sleep_on_timeout(&xprt
->pending
, task
, NULL
,
915 xprt_request_timeout(task
->tk_rqstp
));
917 if (test_bit(XPRT_CLOSING
, &xprt
->state
))
919 if (xprt_test_and_set_connecting(xprt
))
922 if (!xprt_connected(xprt
)) {
923 xprt
->stat
.connect_start
= jiffies
;
924 xprt
->ops
->connect(xprt
, task
);
926 xprt_clear_connecting(xprt
);
928 rpc_wake_up_queued_task(&xprt
->pending
, task
);
931 xprt_release_write(xprt
, task
);
935 * xprt_reconnect_delay - compute the wait before scheduling a connect
936 * @xprt: transport instance
939 unsigned long xprt_reconnect_delay(const struct rpc_xprt
*xprt
)
941 unsigned long start
, now
= jiffies
;
943 start
= xprt
->stat
.connect_start
+ xprt
->reestablish_timeout
;
944 if (time_after(start
, now
))
948 EXPORT_SYMBOL_GPL(xprt_reconnect_delay
);
951 * xprt_reconnect_backoff - compute the new re-establish timeout
952 * @xprt: transport instance
953 * @init_to: initial reestablish timeout
956 void xprt_reconnect_backoff(struct rpc_xprt
*xprt
, unsigned long init_to
)
958 xprt
->reestablish_timeout
<<= 1;
959 if (xprt
->reestablish_timeout
> xprt
->max_reconnect_timeout
)
960 xprt
->reestablish_timeout
= xprt
->max_reconnect_timeout
;
961 if (xprt
->reestablish_timeout
< init_to
)
962 xprt
->reestablish_timeout
= init_to
;
964 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff
);
966 enum xprt_xid_rb_cmp
{
971 static enum xprt_xid_rb_cmp
972 xprt_xid_cmp(__be32 xid1
, __be32 xid2
)
976 if ((__force u32
)xid1
< (__force u32
)xid2
)
981 static struct rpc_rqst
*
982 xprt_request_rb_find(struct rpc_xprt
*xprt
, __be32 xid
)
984 struct rb_node
*n
= xprt
->recv_queue
.rb_node
;
985 struct rpc_rqst
*req
;
988 req
= rb_entry(n
, struct rpc_rqst
, rq_recv
);
989 switch (xprt_xid_cmp(xid
, req
->rq_xid
)) {
1004 xprt_request_rb_insert(struct rpc_xprt
*xprt
, struct rpc_rqst
*new)
1006 struct rb_node
**p
= &xprt
->recv_queue
.rb_node
;
1007 struct rb_node
*n
= NULL
;
1008 struct rpc_rqst
*req
;
1010 while (*p
!= NULL
) {
1012 req
= rb_entry(n
, struct rpc_rqst
, rq_recv
);
1013 switch(xprt_xid_cmp(new->rq_xid
, req
->rq_xid
)) {
1021 WARN_ON_ONCE(new != req
);
1025 rb_link_node(&new->rq_recv
, n
, p
);
1026 rb_insert_color(&new->rq_recv
, &xprt
->recv_queue
);
1030 xprt_request_rb_remove(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
1032 rb_erase(&req
->rq_recv
, &xprt
->recv_queue
);
1036 * xprt_lookup_rqst - find an RPC request corresponding to an XID
1037 * @xprt: transport on which the original request was transmitted
1038 * @xid: RPC XID of incoming reply
1040 * Caller holds xprt->queue_lock.
1042 struct rpc_rqst
*xprt_lookup_rqst(struct rpc_xprt
*xprt
, __be32 xid
)
1044 struct rpc_rqst
*entry
;
1046 entry
= xprt_request_rb_find(xprt
, xid
);
1047 if (entry
!= NULL
) {
1048 trace_xprt_lookup_rqst(xprt
, xid
, 0);
1049 entry
->rq_rtt
= ktime_sub(ktime_get(), entry
->rq_xtime
);
1053 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
1055 trace_xprt_lookup_rqst(xprt
, xid
, -ENOENT
);
1056 xprt
->stat
.bad_xids
++;
1059 EXPORT_SYMBOL_GPL(xprt_lookup_rqst
);
1062 xprt_is_pinned_rqst(struct rpc_rqst
*req
)
1064 return atomic_read(&req
->rq_pin
) != 0;
1068 * xprt_pin_rqst - Pin a request on the transport receive list
1069 * @req: Request to pin
1071 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1072 * so should be holding xprt->queue_lock.
1074 void xprt_pin_rqst(struct rpc_rqst
*req
)
1076 atomic_inc(&req
->rq_pin
);
1078 EXPORT_SYMBOL_GPL(xprt_pin_rqst
);
1081 * xprt_unpin_rqst - Unpin a request on the transport receive list
1082 * @req: Request to pin
1084 * Caller should be holding xprt->queue_lock.
1086 void xprt_unpin_rqst(struct rpc_rqst
*req
)
1088 if (!test_bit(RPC_TASK_MSG_PIN_WAIT
, &req
->rq_task
->tk_runstate
)) {
1089 atomic_dec(&req
->rq_pin
);
1092 if (atomic_dec_and_test(&req
->rq_pin
))
1093 wake_up_var(&req
->rq_pin
);
1095 EXPORT_SYMBOL_GPL(xprt_unpin_rqst
);
1097 static void xprt_wait_on_pinned_rqst(struct rpc_rqst
*req
)
1099 wait_var_event(&req
->rq_pin
, !xprt_is_pinned_rqst(req
));
1103 xprt_request_data_received(struct rpc_task
*task
)
1105 return !test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
) &&
1106 READ_ONCE(task
->tk_rqstp
->rq_reply_bytes_recvd
) != 0;
1110 xprt_request_need_enqueue_receive(struct rpc_task
*task
, struct rpc_rqst
*req
)
1112 return !test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
) &&
1113 READ_ONCE(task
->tk_rqstp
->rq_reply_bytes_recvd
) == 0;
1117 * xprt_request_enqueue_receive - Add an request to the receive queue
1122 xprt_request_enqueue_receive(struct rpc_task
*task
)
1124 struct rpc_rqst
*req
= task
->tk_rqstp
;
1125 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1127 if (!xprt_request_need_enqueue_receive(task
, req
))
1130 xprt_request_prepare(task
->tk_rqstp
);
1131 spin_lock(&xprt
->queue_lock
);
1133 /* Update the softirq receive buffer */
1134 memcpy(&req
->rq_private_buf
, &req
->rq_rcv_buf
,
1135 sizeof(req
->rq_private_buf
));
1137 /* Add request to the receive list */
1138 xprt_request_rb_insert(xprt
, req
);
1139 set_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
);
1140 spin_unlock(&xprt
->queue_lock
);
1142 /* Turn off autodisconnect */
1143 del_singleshot_timer_sync(&xprt
->timer
);
1147 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1150 * Caller must hold xprt->queue_lock.
1153 xprt_request_dequeue_receive_locked(struct rpc_task
*task
)
1155 struct rpc_rqst
*req
= task
->tk_rqstp
;
1157 if (test_and_clear_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
))
1158 xprt_request_rb_remove(req
->rq_xprt
, req
);
1162 * xprt_update_rtt - Update RPC RTT statistics
1163 * @task: RPC request that recently completed
1165 * Caller holds xprt->queue_lock.
1167 void xprt_update_rtt(struct rpc_task
*task
)
1169 struct rpc_rqst
*req
= task
->tk_rqstp
;
1170 struct rpc_rtt
*rtt
= task
->tk_client
->cl_rtt
;
1171 unsigned int timer
= task
->tk_msg
.rpc_proc
->p_timer
;
1172 long m
= usecs_to_jiffies(ktime_to_us(req
->rq_rtt
));
1175 if (req
->rq_ntrans
== 1)
1176 rpc_update_rtt(rtt
, timer
, m
);
1177 rpc_set_timeo(rtt
, timer
, req
->rq_ntrans
- 1);
1180 EXPORT_SYMBOL_GPL(xprt_update_rtt
);
1183 * xprt_complete_rqst - called when reply processing is complete
1184 * @task: RPC request that recently completed
1185 * @copied: actual number of bytes received from the transport
1187 * Caller holds xprt->queue_lock.
1189 void xprt_complete_rqst(struct rpc_task
*task
, int copied
)
1191 struct rpc_rqst
*req
= task
->tk_rqstp
;
1192 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1196 req
->rq_private_buf
.len
= copied
;
1197 /* Ensure all writes are done before we update */
1198 /* req->rq_reply_bytes_recvd */
1200 req
->rq_reply_bytes_recvd
= copied
;
1201 xprt_request_dequeue_receive_locked(task
);
1202 rpc_wake_up_queued_task(&xprt
->pending
, task
);
1204 EXPORT_SYMBOL_GPL(xprt_complete_rqst
);
1206 static void xprt_timer(struct rpc_task
*task
)
1208 struct rpc_rqst
*req
= task
->tk_rqstp
;
1209 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1211 if (task
->tk_status
!= -ETIMEDOUT
)
1214 trace_xprt_timer(xprt
, req
->rq_xid
, task
->tk_status
);
1215 if (!req
->rq_reply_bytes_recvd
) {
1216 if (xprt
->ops
->timer
)
1217 xprt
->ops
->timer(xprt
, task
);
1219 task
->tk_status
= 0;
1223 * xprt_wait_for_reply_request_def - wait for reply
1224 * @task: pointer to rpc_task
1226 * Set a request's retransmit timeout based on the transport's
1227 * default timeout parameters. Used by transports that don't adjust
1228 * the retransmit timeout based on round-trip time estimation,
1229 * and put the task to sleep on the pending queue.
1231 void xprt_wait_for_reply_request_def(struct rpc_task
*task
)
1233 struct rpc_rqst
*req
= task
->tk_rqstp
;
1235 rpc_sleep_on_timeout(&req
->rq_xprt
->pending
, task
, xprt_timer
,
1236 xprt_request_timeout(req
));
1238 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def
);
1241 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1242 * @task: pointer to rpc_task
1244 * Set a request's retransmit timeout using the RTT estimator,
1245 * and put the task to sleep on the pending queue.
1247 void xprt_wait_for_reply_request_rtt(struct rpc_task
*task
)
1249 int timer
= task
->tk_msg
.rpc_proc
->p_timer
;
1250 struct rpc_clnt
*clnt
= task
->tk_client
;
1251 struct rpc_rtt
*rtt
= clnt
->cl_rtt
;
1252 struct rpc_rqst
*req
= task
->tk_rqstp
;
1253 unsigned long max_timeout
= clnt
->cl_timeout
->to_maxval
;
1254 unsigned long timeout
;
1256 timeout
= rpc_calc_rto(rtt
, timer
);
1257 timeout
<<= rpc_ntimeo(rtt
, timer
) + req
->rq_retries
;
1258 if (timeout
> max_timeout
|| timeout
== 0)
1259 timeout
= max_timeout
;
1260 rpc_sleep_on_timeout(&req
->rq_xprt
->pending
, task
, xprt_timer
,
1263 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt
);
1266 * xprt_request_wait_receive - wait for the reply to an RPC request
1267 * @task: RPC task about to send a request
1270 void xprt_request_wait_receive(struct rpc_task
*task
)
1272 struct rpc_rqst
*req
= task
->tk_rqstp
;
1273 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1275 if (!test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
))
1278 * Sleep on the pending queue if we're expecting a reply.
1279 * The spinlock ensures atomicity between the test of
1280 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1282 spin_lock(&xprt
->queue_lock
);
1283 if (test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
)) {
1284 xprt
->ops
->wait_for_reply_request(task
);
1286 * Send an extra queue wakeup call if the
1287 * connection was dropped in case the call to
1288 * rpc_sleep_on() raced.
1290 if (xprt_request_retransmit_after_disconnect(task
))
1291 rpc_wake_up_queued_task_set_status(&xprt
->pending
,
1294 spin_unlock(&xprt
->queue_lock
);
1298 xprt_request_need_enqueue_transmit(struct rpc_task
*task
, struct rpc_rqst
*req
)
1300 return !test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
);
1304 * xprt_request_enqueue_transmit - queue a task for transmission
1305 * @task: pointer to rpc_task
1307 * Add a task to the transmission queue.
1310 xprt_request_enqueue_transmit(struct rpc_task
*task
)
1312 struct rpc_rqst
*pos
, *req
= task
->tk_rqstp
;
1313 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1315 if (xprt_request_need_enqueue_transmit(task
, req
)) {
1316 req
->rq_bytes_sent
= 0;
1317 spin_lock(&xprt
->queue_lock
);
1319 * Requests that carry congestion control credits are added
1320 * to the head of the list to avoid starvation issues.
1323 xprt_clear_congestion_window_wait(xprt
);
1324 list_for_each_entry(pos
, &xprt
->xmit_queue
, rq_xmit
) {
1327 /* Note: req is added _before_ pos */
1328 list_add_tail(&req
->rq_xmit
, &pos
->rq_xmit
);
1329 INIT_LIST_HEAD(&req
->rq_xmit2
);
1332 } else if (RPC_IS_SWAPPER(task
)) {
1333 list_for_each_entry(pos
, &xprt
->xmit_queue
, rq_xmit
) {
1334 if (pos
->rq_cong
|| pos
->rq_bytes_sent
)
1336 if (RPC_IS_SWAPPER(pos
->rq_task
))
1338 /* Note: req is added _before_ pos */
1339 list_add_tail(&req
->rq_xmit
, &pos
->rq_xmit
);
1340 INIT_LIST_HEAD(&req
->rq_xmit2
);
1343 } else if (!req
->rq_seqno
) {
1344 list_for_each_entry(pos
, &xprt
->xmit_queue
, rq_xmit
) {
1345 if (pos
->rq_task
->tk_owner
!= task
->tk_owner
)
1347 list_add_tail(&req
->rq_xmit2
, &pos
->rq_xmit2
);
1348 INIT_LIST_HEAD(&req
->rq_xmit
);
1352 list_add_tail(&req
->rq_xmit
, &xprt
->xmit_queue
);
1353 INIT_LIST_HEAD(&req
->rq_xmit2
);
1355 atomic_long_inc(&xprt
->xmit_queuelen
);
1356 set_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
);
1357 spin_unlock(&xprt
->queue_lock
);
1362 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1363 * @task: pointer to rpc_task
1365 * Remove a task from the transmission queue
1366 * Caller must hold xprt->queue_lock
1369 xprt_request_dequeue_transmit_locked(struct rpc_task
*task
)
1371 struct rpc_rqst
*req
= task
->tk_rqstp
;
1373 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1375 if (!list_empty(&req
->rq_xmit
)) {
1376 list_del(&req
->rq_xmit
);
1377 if (!list_empty(&req
->rq_xmit2
)) {
1378 struct rpc_rqst
*next
= list_first_entry(&req
->rq_xmit2
,
1379 struct rpc_rqst
, rq_xmit2
);
1380 list_del(&req
->rq_xmit2
);
1381 list_add_tail(&next
->rq_xmit
, &next
->rq_xprt
->xmit_queue
);
1384 list_del(&req
->rq_xmit2
);
1385 atomic_long_dec(&req
->rq_xprt
->xmit_queuelen
);
1389 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1390 * @task: pointer to rpc_task
1392 * Remove a task from the transmission queue
1395 xprt_request_dequeue_transmit(struct rpc_task
*task
)
1397 struct rpc_rqst
*req
= task
->tk_rqstp
;
1398 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1400 spin_lock(&xprt
->queue_lock
);
1401 xprt_request_dequeue_transmit_locked(task
);
1402 spin_unlock(&xprt
->queue_lock
);
1406 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1407 * @task: pointer to rpc_task
1409 * Remove a task from the transmit and receive queues, and ensure that
1410 * it is not pinned by the receive work item.
1413 xprt_request_dequeue_xprt(struct rpc_task
*task
)
1415 struct rpc_rqst
*req
= task
->tk_rqstp
;
1416 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1418 if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
) ||
1419 test_bit(RPC_TASK_NEED_RECV
, &task
->tk_runstate
) ||
1420 xprt_is_pinned_rqst(req
)) {
1421 spin_lock(&xprt
->queue_lock
);
1422 xprt_request_dequeue_transmit_locked(task
);
1423 xprt_request_dequeue_receive_locked(task
);
1424 while (xprt_is_pinned_rqst(req
)) {
1425 set_bit(RPC_TASK_MSG_PIN_WAIT
, &task
->tk_runstate
);
1426 spin_unlock(&xprt
->queue_lock
);
1427 xprt_wait_on_pinned_rqst(req
);
1428 spin_lock(&xprt
->queue_lock
);
1429 clear_bit(RPC_TASK_MSG_PIN_WAIT
, &task
->tk_runstate
);
1431 spin_unlock(&xprt
->queue_lock
);
1436 * xprt_request_prepare - prepare an encoded request for transport
1437 * @req: pointer to rpc_rqst
1439 * Calls into the transport layer to do whatever is needed to prepare
1440 * the request for transmission or receive.
1443 xprt_request_prepare(struct rpc_rqst
*req
)
1445 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1447 if (xprt
->ops
->prepare_request
)
1448 xprt
->ops
->prepare_request(req
);
1452 * xprt_request_need_retransmit - Test if a task needs retransmission
1453 * @task: pointer to rpc_task
1455 * Test for whether a connection breakage requires the task to retransmit
1458 xprt_request_need_retransmit(struct rpc_task
*task
)
1460 return xprt_request_retransmit_after_disconnect(task
);
1464 * xprt_prepare_transmit - reserve the transport before sending a request
1465 * @task: RPC task about to send a request
1468 bool xprt_prepare_transmit(struct rpc_task
*task
)
1470 struct rpc_rqst
*req
= task
->tk_rqstp
;
1471 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1473 if (!xprt_lock_write(xprt
, task
)) {
1474 /* Race breaker: someone may have transmitted us */
1475 if (!test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1476 rpc_wake_up_queued_task_set_status(&xprt
->sending
,
1484 void xprt_end_transmit(struct rpc_task
*task
)
1486 struct rpc_xprt
*xprt
= task
->tk_rqstp
->rq_xprt
;
1488 xprt_inject_disconnect(xprt
);
1489 xprt_release_write(xprt
, task
);
1493 * xprt_request_transmit - send an RPC request on a transport
1494 * @req: pointer to request to transmit
1495 * @snd_task: RPC task that owns the transport lock
1497 * This performs the transmission of a single request.
1498 * Note that if the request is not the same as snd_task, then it
1499 * does need to be pinned.
1500 * Returns '0' on success.
1503 xprt_request_transmit(struct rpc_rqst
*req
, struct rpc_task
*snd_task
)
1505 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1506 struct rpc_task
*task
= req
->rq_task
;
1507 unsigned int connect_cookie
;
1508 int is_retrans
= RPC_WAS_SENT(task
);
1511 if (!req
->rq_bytes_sent
) {
1512 if (xprt_request_data_received(task
)) {
1516 /* Verify that our message lies in the RPCSEC_GSS window */
1517 if (rpcauth_xmit_need_reencode(task
)) {
1521 if (RPC_SIGNALLED(task
)) {
1522 status
= -ERESTARTSYS
;
1528 * Update req->rq_ntrans before transmitting to avoid races with
1529 * xprt_update_rtt(), which needs to know that it is recording a
1530 * reply to the first transmission.
1534 trace_rpc_xdr_sendto(task
, &req
->rq_snd_buf
);
1535 connect_cookie
= xprt
->connect_cookie
;
1536 status
= xprt
->ops
->send_request(req
);
1539 trace_xprt_transmit(req
, status
);
1544 task
->tk_client
->cl_stats
->rpcretrans
++;
1545 trace_xprt_retransmit(req
);
1548 xprt_inject_disconnect(xprt
);
1550 task
->tk_flags
|= RPC_TASK_SENT
;
1551 spin_lock(&xprt
->transport_lock
);
1554 xprt
->stat
.req_u
+= xprt
->stat
.sends
- xprt
->stat
.recvs
;
1555 xprt
->stat
.bklog_u
+= xprt
->backlog
.qlen
;
1556 xprt
->stat
.sending_u
+= xprt
->sending
.qlen
;
1557 xprt
->stat
.pending_u
+= xprt
->pending
.qlen
;
1558 spin_unlock(&xprt
->transport_lock
);
1560 req
->rq_connect_cookie
= connect_cookie
;
1562 trace_xprt_transmit(req
, status
);
1563 xprt_request_dequeue_transmit(task
);
1564 rpc_wake_up_queued_task_set_status(&xprt
->sending
, task
, status
);
1569 * xprt_transmit - send an RPC request on a transport
1570 * @task: controlling RPC task
1572 * Attempts to drain the transmit queue. On exit, either the transport
1573 * signalled an error that needs to be handled before transmission can
1574 * resume, or @task finished transmitting, and detected that it already
1578 xprt_transmit(struct rpc_task
*task
)
1580 struct rpc_rqst
*next
, *req
= task
->tk_rqstp
;
1581 struct rpc_xprt
*xprt
= req
->rq_xprt
;
1582 int counter
, status
;
1584 spin_lock(&xprt
->queue_lock
);
1586 while (!list_empty(&xprt
->xmit_queue
)) {
1587 if (++counter
== 20)
1589 next
= list_first_entry(&xprt
->xmit_queue
,
1590 struct rpc_rqst
, rq_xmit
);
1591 xprt_pin_rqst(next
);
1592 spin_unlock(&xprt
->queue_lock
);
1593 status
= xprt_request_transmit(next
, task
);
1594 if (status
== -EBADMSG
&& next
!= req
)
1596 spin_lock(&xprt
->queue_lock
);
1597 xprt_unpin_rqst(next
);
1599 if (!xprt_request_data_received(task
) ||
1600 test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1602 } else if (test_bit(RPC_TASK_NEED_XMIT
, &task
->tk_runstate
))
1603 task
->tk_status
= status
;
1606 spin_unlock(&xprt
->queue_lock
);
1609 static void xprt_add_backlog(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1611 set_bit(XPRT_CONGESTED
, &xprt
->state
);
1612 rpc_sleep_on(&xprt
->backlog
, task
, NULL
);
1615 static void xprt_wake_up_backlog(struct rpc_xprt
*xprt
)
1617 if (rpc_wake_up_next(&xprt
->backlog
) == NULL
)
1618 clear_bit(XPRT_CONGESTED
, &xprt
->state
);
1621 static bool xprt_throttle_congested(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1625 if (!test_bit(XPRT_CONGESTED
, &xprt
->state
))
1627 spin_lock(&xprt
->reserve_lock
);
1628 if (test_bit(XPRT_CONGESTED
, &xprt
->state
)) {
1629 rpc_sleep_on(&xprt
->backlog
, task
, NULL
);
1632 spin_unlock(&xprt
->reserve_lock
);
1637 static struct rpc_rqst
*xprt_dynamic_alloc_slot(struct rpc_xprt
*xprt
)
1639 struct rpc_rqst
*req
= ERR_PTR(-EAGAIN
);
1641 if (xprt
->num_reqs
>= xprt
->max_reqs
)
1644 spin_unlock(&xprt
->reserve_lock
);
1645 req
= kzalloc(sizeof(struct rpc_rqst
), GFP_NOFS
);
1646 spin_lock(&xprt
->reserve_lock
);
1650 req
= ERR_PTR(-ENOMEM
);
1655 static bool xprt_dynamic_free_slot(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
1657 if (xprt
->num_reqs
> xprt
->min_reqs
) {
1665 void xprt_alloc_slot(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1667 struct rpc_rqst
*req
;
1669 spin_lock(&xprt
->reserve_lock
);
1670 if (!list_empty(&xprt
->free
)) {
1671 req
= list_entry(xprt
->free
.next
, struct rpc_rqst
, rq_list
);
1672 list_del(&req
->rq_list
);
1675 req
= xprt_dynamic_alloc_slot(xprt
);
1678 switch (PTR_ERR(req
)) {
1680 dprintk("RPC: dynamic allocation of request slot "
1681 "failed! Retrying\n");
1682 task
->tk_status
= -ENOMEM
;
1685 xprt_add_backlog(xprt
, task
);
1686 dprintk("RPC: waiting for request slot\n");
1689 task
->tk_status
= -EAGAIN
;
1691 spin_unlock(&xprt
->reserve_lock
);
1694 xprt
->stat
.max_slots
= max_t(unsigned int, xprt
->stat
.max_slots
,
1696 spin_unlock(&xprt
->reserve_lock
);
1698 task
->tk_status
= 0;
1699 task
->tk_rqstp
= req
;
1701 EXPORT_SYMBOL_GPL(xprt_alloc_slot
);
1703 void xprt_free_slot(struct rpc_xprt
*xprt
, struct rpc_rqst
*req
)
1705 spin_lock(&xprt
->reserve_lock
);
1706 if (!xprt_dynamic_free_slot(xprt
, req
)) {
1707 memset(req
, 0, sizeof(*req
)); /* mark unused */
1708 list_add(&req
->rq_list
, &xprt
->free
);
1710 xprt_wake_up_backlog(xprt
);
1711 spin_unlock(&xprt
->reserve_lock
);
1713 EXPORT_SYMBOL_GPL(xprt_free_slot
);
1715 static void xprt_free_all_slots(struct rpc_xprt
*xprt
)
1717 struct rpc_rqst
*req
;
1718 while (!list_empty(&xprt
->free
)) {
1719 req
= list_first_entry(&xprt
->free
, struct rpc_rqst
, rq_list
);
1720 list_del(&req
->rq_list
);
1725 struct rpc_xprt
*xprt_alloc(struct net
*net
, size_t size
,
1726 unsigned int num_prealloc
,
1727 unsigned int max_alloc
)
1729 struct rpc_xprt
*xprt
;
1730 struct rpc_rqst
*req
;
1733 xprt
= kzalloc(size
, GFP_KERNEL
);
1737 xprt_init(xprt
, net
);
1739 for (i
= 0; i
< num_prealloc
; i
++) {
1740 req
= kzalloc(sizeof(struct rpc_rqst
), GFP_KERNEL
);
1743 list_add(&req
->rq_list
, &xprt
->free
);
1745 if (max_alloc
> num_prealloc
)
1746 xprt
->max_reqs
= max_alloc
;
1748 xprt
->max_reqs
= num_prealloc
;
1749 xprt
->min_reqs
= num_prealloc
;
1750 xprt
->num_reqs
= num_prealloc
;
1759 EXPORT_SYMBOL_GPL(xprt_alloc
);
1761 void xprt_free(struct rpc_xprt
*xprt
)
1763 put_net(xprt
->xprt_net
);
1764 xprt_free_all_slots(xprt
);
1765 kfree_rcu(xprt
, rcu
);
1767 EXPORT_SYMBOL_GPL(xprt_free
);
1770 xprt_init_connect_cookie(struct rpc_rqst
*req
, struct rpc_xprt
*xprt
)
1772 req
->rq_connect_cookie
= xprt_connect_cookie(xprt
) - 1;
1776 xprt_alloc_xid(struct rpc_xprt
*xprt
)
1780 spin_lock(&xprt
->reserve_lock
);
1781 xid
= (__force __be32
)xprt
->xid
++;
1782 spin_unlock(&xprt
->reserve_lock
);
1787 xprt_init_xid(struct rpc_xprt
*xprt
)
1789 xprt
->xid
= prandom_u32();
1793 xprt_request_init(struct rpc_task
*task
)
1795 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1796 struct rpc_rqst
*req
= task
->tk_rqstp
;
1798 req
->rq_task
= task
;
1799 req
->rq_xprt
= xprt
;
1800 req
->rq_buffer
= NULL
;
1801 req
->rq_xid
= xprt_alloc_xid(xprt
);
1802 xprt_init_connect_cookie(req
, xprt
);
1803 req
->rq_snd_buf
.len
= 0;
1804 req
->rq_snd_buf
.buflen
= 0;
1805 req
->rq_rcv_buf
.len
= 0;
1806 req
->rq_rcv_buf
.buflen
= 0;
1807 req
->rq_snd_buf
.bvec
= NULL
;
1808 req
->rq_rcv_buf
.bvec
= NULL
;
1809 req
->rq_release_snd_buf
= NULL
;
1810 xprt_init_majortimeo(task
, req
);
1812 trace_xprt_reserve(req
);
1816 xprt_do_reserve(struct rpc_xprt
*xprt
, struct rpc_task
*task
)
1818 xprt
->ops
->alloc_slot(xprt
, task
);
1819 if (task
->tk_rqstp
!= NULL
)
1820 xprt_request_init(task
);
1824 * xprt_reserve - allocate an RPC request slot
1825 * @task: RPC task requesting a slot allocation
1827 * If the transport is marked as being congested, or if no more
1828 * slots are available, place the task on the transport's
1831 void xprt_reserve(struct rpc_task
*task
)
1833 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1835 task
->tk_status
= 0;
1836 if (task
->tk_rqstp
!= NULL
)
1839 task
->tk_status
= -EAGAIN
;
1840 if (!xprt_throttle_congested(xprt
, task
))
1841 xprt_do_reserve(xprt
, task
);
1845 * xprt_retry_reserve - allocate an RPC request slot
1846 * @task: RPC task requesting a slot allocation
1848 * If no more slots are available, place the task on the transport's
1850 * Note that the only difference with xprt_reserve is that we now
1851 * ignore the value of the XPRT_CONGESTED flag.
1853 void xprt_retry_reserve(struct rpc_task
*task
)
1855 struct rpc_xprt
*xprt
= task
->tk_xprt
;
1857 task
->tk_status
= 0;
1858 if (task
->tk_rqstp
!= NULL
)
1861 task
->tk_status
= -EAGAIN
;
1862 xprt_do_reserve(xprt
, task
);
1866 * xprt_release - release an RPC request slot
1867 * @task: task which is finished with the slot
1870 void xprt_release(struct rpc_task
*task
)
1872 struct rpc_xprt
*xprt
;
1873 struct rpc_rqst
*req
= task
->tk_rqstp
;
1876 if (task
->tk_client
) {
1877 xprt
= task
->tk_xprt
;
1878 xprt_release_write(xprt
, task
);
1883 xprt
= req
->rq_xprt
;
1884 xprt_request_dequeue_xprt(task
);
1885 spin_lock(&xprt
->transport_lock
);
1886 xprt
->ops
->release_xprt(xprt
, task
);
1887 if (xprt
->ops
->release_request
)
1888 xprt
->ops
->release_request(task
);
1889 xprt_schedule_autodisconnect(xprt
);
1890 spin_unlock(&xprt
->transport_lock
);
1892 xprt
->ops
->buf_free(task
);
1893 xdr_free_bvec(&req
->rq_rcv_buf
);
1894 xdr_free_bvec(&req
->rq_snd_buf
);
1895 if (req
->rq_cred
!= NULL
)
1896 put_rpccred(req
->rq_cred
);
1897 task
->tk_rqstp
= NULL
;
1898 if (req
->rq_release_snd_buf
)
1899 req
->rq_release_snd_buf(req
);
1901 if (likely(!bc_prealloc(req
)))
1902 xprt
->ops
->free_slot(xprt
, req
);
1904 xprt_free_bc_request(req
);
1907 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1909 xprt_init_bc_request(struct rpc_rqst
*req
, struct rpc_task
*task
)
1911 struct xdr_buf
*xbufp
= &req
->rq_snd_buf
;
1913 task
->tk_rqstp
= req
;
1914 req
->rq_task
= task
;
1915 xprt_init_connect_cookie(req
, req
->rq_xprt
);
1917 * Set up the xdr_buf length.
1918 * This also indicates that the buffer is XDR encoded already.
1920 xbufp
->len
= xbufp
->head
[0].iov_len
+ xbufp
->page_len
+
1921 xbufp
->tail
[0].iov_len
;
1925 static void xprt_init(struct rpc_xprt
*xprt
, struct net
*net
)
1927 kref_init(&xprt
->kref
);
1929 spin_lock_init(&xprt
->transport_lock
);
1930 spin_lock_init(&xprt
->reserve_lock
);
1931 spin_lock_init(&xprt
->queue_lock
);
1933 INIT_LIST_HEAD(&xprt
->free
);
1934 xprt
->recv_queue
= RB_ROOT
;
1935 INIT_LIST_HEAD(&xprt
->xmit_queue
);
1936 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1937 spin_lock_init(&xprt
->bc_pa_lock
);
1938 INIT_LIST_HEAD(&xprt
->bc_pa_list
);
1939 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1940 INIT_LIST_HEAD(&xprt
->xprt_switch
);
1942 xprt
->last_used
= jiffies
;
1943 xprt
->cwnd
= RPC_INITCWND
;
1944 xprt
->bind_index
= 0;
1946 rpc_init_wait_queue(&xprt
->binding
, "xprt_binding");
1947 rpc_init_wait_queue(&xprt
->pending
, "xprt_pending");
1948 rpc_init_wait_queue(&xprt
->sending
, "xprt_sending");
1949 rpc_init_priority_wait_queue(&xprt
->backlog
, "xprt_backlog");
1951 xprt_init_xid(xprt
);
1953 xprt
->xprt_net
= get_net(net
);
1957 * xprt_create_transport - create an RPC transport
1958 * @args: rpc transport creation arguments
1961 struct rpc_xprt
*xprt_create_transport(struct xprt_create
*args
)
1963 struct rpc_xprt
*xprt
;
1964 const struct xprt_class
*t
;
1966 t
= xprt_class_find_by_ident(args
->ident
);
1968 dprintk("RPC: transport (%d) not supported\n", args
->ident
);
1969 return ERR_PTR(-EIO
);
1972 xprt
= t
->setup(args
);
1973 xprt_class_release(t
);
1977 if (args
->flags
& XPRT_CREATE_NO_IDLE_TIMEOUT
)
1978 xprt
->idle_timeout
= 0;
1979 INIT_WORK(&xprt
->task_cleanup
, xprt_autoclose
);
1980 if (xprt_has_timer(xprt
))
1981 timer_setup(&xprt
->timer
, xprt_init_autodisconnect
, 0);
1983 timer_setup(&xprt
->timer
, NULL
, 0);
1985 if (strlen(args
->servername
) > RPC_MAXNETNAMELEN
) {
1987 return ERR_PTR(-EINVAL
);
1989 xprt
->servername
= kstrdup(args
->servername
, GFP_KERNEL
);
1990 if (xprt
->servername
== NULL
) {
1992 return ERR_PTR(-ENOMEM
);
1995 rpc_xprt_debugfs_register(xprt
);
1997 trace_xprt_create(xprt
);
2002 static void xprt_destroy_cb(struct work_struct
*work
)
2004 struct rpc_xprt
*xprt
=
2005 container_of(work
, struct rpc_xprt
, task_cleanup
);
2007 trace_xprt_destroy(xprt
);
2009 rpc_xprt_debugfs_unregister(xprt
);
2010 rpc_destroy_wait_queue(&xprt
->binding
);
2011 rpc_destroy_wait_queue(&xprt
->pending
);
2012 rpc_destroy_wait_queue(&xprt
->sending
);
2013 rpc_destroy_wait_queue(&xprt
->backlog
);
2014 kfree(xprt
->servername
);
2016 * Destroy any existing back channel
2018 xprt_destroy_backchannel(xprt
, UINT_MAX
);
2021 * Tear down transport state and free the rpc_xprt
2023 xprt
->ops
->destroy(xprt
);
2027 * xprt_destroy - destroy an RPC transport, killing off all requests.
2028 * @xprt: transport to destroy
2031 static void xprt_destroy(struct rpc_xprt
*xprt
)
2034 * Exclude transport connect/disconnect handlers and autoclose
2036 wait_on_bit_lock(&xprt
->state
, XPRT_LOCKED
, TASK_UNINTERRUPTIBLE
);
2038 del_timer_sync(&xprt
->timer
);
2041 * Destroy sockets etc from the system workqueue so they can
2042 * safely flush receive work running on rpciod.
2044 INIT_WORK(&xprt
->task_cleanup
, xprt_destroy_cb
);
2045 schedule_work(&xprt
->task_cleanup
);
2048 static void xprt_destroy_kref(struct kref
*kref
)
2050 xprt_destroy(container_of(kref
, struct rpc_xprt
, kref
));
2054 * xprt_get - return a reference to an RPC transport.
2055 * @xprt: pointer to the transport
2058 struct rpc_xprt
*xprt_get(struct rpc_xprt
*xprt
)
2060 if (xprt
!= NULL
&& kref_get_unless_zero(&xprt
->kref
))
2064 EXPORT_SYMBOL_GPL(xprt_get
);
2067 * xprt_put - release a reference to an RPC transport.
2068 * @xprt: pointer to the transport
2071 void xprt_put(struct rpc_xprt
*xprt
)
2074 kref_put(&xprt
->kref
, xprt_destroy_kref
);
2076 EXPORT_SYMBOL_GPL(xprt_put
);