2 * linux/net/sunrpc/clnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP connect handling.
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/utsname.h>
31 #include <linux/workqueue.h>
33 #include <linux/sunrpc/clnt.h>
34 #include <linux/sunrpc/rpc_pipe_fs.h>
35 #include <linux/sunrpc/metrics.h>
38 #define RPC_SLACK_SPACE (1024) /* total overkill */
41 # define RPCDBG_FACILITY RPCDBG_CALL
44 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait
);
47 static void call_start(struct rpc_task
*task
);
48 static void call_reserve(struct rpc_task
*task
);
49 static void call_reserveresult(struct rpc_task
*task
);
50 static void call_allocate(struct rpc_task
*task
);
51 static void call_encode(struct rpc_task
*task
);
52 static void call_decode(struct rpc_task
*task
);
53 static void call_bind(struct rpc_task
*task
);
54 static void call_bind_status(struct rpc_task
*task
);
55 static void call_transmit(struct rpc_task
*task
);
56 static void call_status(struct rpc_task
*task
);
57 static void call_transmit_status(struct rpc_task
*task
);
58 static void call_refresh(struct rpc_task
*task
);
59 static void call_refreshresult(struct rpc_task
*task
);
60 static void call_timeout(struct rpc_task
*task
);
61 static void call_connect(struct rpc_task
*task
);
62 static void call_connect_status(struct rpc_task
*task
);
63 static u32
* call_header(struct rpc_task
*task
);
64 static u32
* call_verify(struct rpc_task
*task
);
68 rpc_setup_pipedir(struct rpc_clnt
*clnt
, char *dir_name
)
70 static uint32_t clntid
;
73 clnt
->cl_vfsmnt
= ERR_PTR(-ENOENT
);
74 clnt
->cl_dentry
= ERR_PTR(-ENOENT
);
78 clnt
->cl_vfsmnt
= rpc_get_mount();
79 if (IS_ERR(clnt
->cl_vfsmnt
))
80 return PTR_ERR(clnt
->cl_vfsmnt
);
83 snprintf(clnt
->cl_pathname
, sizeof(clnt
->cl_pathname
),
84 "%s/clnt%x", dir_name
,
85 (unsigned int)clntid
++);
86 clnt
->cl_pathname
[sizeof(clnt
->cl_pathname
) - 1] = '\0';
87 clnt
->cl_dentry
= rpc_mkdir(clnt
->cl_pathname
, clnt
);
88 if (!IS_ERR(clnt
->cl_dentry
))
90 error
= PTR_ERR(clnt
->cl_dentry
);
91 if (error
!= -EEXIST
) {
92 printk(KERN_INFO
"RPC: Couldn't create pipefs entry %s, error %d\n",
93 clnt
->cl_pathname
, error
);
101 * Create an RPC client
102 * FIXME: This should also take a flags argument (as in task->tk_flags).
103 * It's called (among others) from pmap_create_client, which may in
104 * turn be called by an async task. In this case, rpciod should not be
105 * made to sleep too long.
108 rpc_new_client(struct rpc_xprt
*xprt
, char *servname
,
109 struct rpc_program
*program
, u32 vers
,
110 rpc_authflavor_t flavor
)
112 struct rpc_version
*version
;
113 struct rpc_clnt
*clnt
= NULL
;
114 struct rpc_auth
*auth
;
118 dprintk("RPC: creating %s client for %s (xprt %p)\n",
119 program
->name
, servname
, xprt
);
124 if (vers
>= program
->nrvers
|| !(version
= program
->version
[vers
]))
128 clnt
= kzalloc(sizeof(*clnt
), GFP_KERNEL
);
131 atomic_set(&clnt
->cl_users
, 0);
132 atomic_set(&clnt
->cl_count
, 1);
133 clnt
->cl_parent
= clnt
;
135 clnt
->cl_server
= clnt
->cl_inline_name
;
136 len
= strlen(servname
) + 1;
137 if (len
> sizeof(clnt
->cl_inline_name
)) {
138 char *buf
= kmalloc(len
, GFP_KERNEL
);
140 clnt
->cl_server
= buf
;
142 len
= sizeof(clnt
->cl_inline_name
);
144 strlcpy(clnt
->cl_server
, servname
, len
);
146 clnt
->cl_xprt
= xprt
;
147 clnt
->cl_procinfo
= version
->procs
;
148 clnt
->cl_maxproc
= version
->nrprocs
;
149 clnt
->cl_protname
= program
->name
;
150 clnt
->cl_pmap
= &clnt
->cl_pmap_default
;
151 clnt
->cl_port
= xprt
->addr
.sin_port
;
152 clnt
->cl_prog
= program
->number
;
153 clnt
->cl_vers
= version
->number
;
154 clnt
->cl_prot
= xprt
->prot
;
155 clnt
->cl_stats
= program
->stats
;
156 clnt
->cl_metrics
= rpc_alloc_iostats(clnt
);
157 rpc_init_wait_queue(&clnt
->cl_pmap_default
.pm_bindwait
, "bindwait");
160 clnt
->cl_autobind
= 1;
162 clnt
->cl_rtt
= &clnt
->cl_rtt_default
;
163 rpc_init_rtt(&clnt
->cl_rtt_default
, xprt
->timeout
.to_initval
);
165 err
= rpc_setup_pipedir(clnt
, program
->pipe_dir_name
);
169 auth
= rpcauth_create(flavor
, clnt
);
171 printk(KERN_INFO
"RPC: Couldn't create auth handle (flavor %u)\n",
177 /* save the nodename */
178 clnt
->cl_nodelen
= strlen(system_utsname
.nodename
);
179 if (clnt
->cl_nodelen
> UNX_MAXNODENAME
)
180 clnt
->cl_nodelen
= UNX_MAXNODENAME
;
181 memcpy(clnt
->cl_nodename
, system_utsname
.nodename
, clnt
->cl_nodelen
);
185 if (!IS_ERR(clnt
->cl_dentry
)) {
186 rpc_rmdir(clnt
->cl_pathname
);
187 dput(clnt
->cl_dentry
);
191 if (clnt
->cl_server
!= clnt
->cl_inline_name
)
192 kfree(clnt
->cl_server
);
201 * Create an RPC client
202 * @xprt - pointer to xprt struct
203 * @servname - name of server
204 * @info - rpc_program
205 * @version - rpc_program version
206 * @authflavor - rpc_auth flavour to use
208 * Creates an RPC client structure, then pings the server in order to
209 * determine if it is up, and if it supports this program and version.
211 * This function should never be called by asynchronous tasks such as
214 struct rpc_clnt
*rpc_create_client(struct rpc_xprt
*xprt
, char *servname
,
215 struct rpc_program
*info
, u32 version
, rpc_authflavor_t authflavor
)
217 struct rpc_clnt
*clnt
;
220 clnt
= rpc_new_client(xprt
, servname
, info
, version
, authflavor
);
223 err
= rpc_ping(clnt
, RPC_TASK_SOFT
|RPC_TASK_NOINTR
);
226 rpc_shutdown_client(clnt
);
231 * This function clones the RPC client structure. It allows us to share the
232 * same transport while varying parameters such as the authentication
236 rpc_clone_client(struct rpc_clnt
*clnt
)
238 struct rpc_clnt
*new;
240 new = kmalloc(sizeof(*new), GFP_KERNEL
);
243 memcpy(new, clnt
, sizeof(*new));
244 atomic_set(&new->cl_count
, 1);
245 atomic_set(&new->cl_users
, 0);
246 new->cl_parent
= clnt
;
247 atomic_inc(&clnt
->cl_count
);
248 /* Duplicate portmapper */
249 rpc_init_wait_queue(&new->cl_pmap_default
.pm_bindwait
, "bindwait");
250 /* Turn off autobind on clones */
251 new->cl_autobind
= 0;
254 if (!IS_ERR(new->cl_dentry
)) {
255 dget(new->cl_dentry
);
258 rpc_init_rtt(&new->cl_rtt_default
, clnt
->cl_xprt
->timeout
.to_initval
);
260 atomic_inc(&new->cl_auth
->au_count
);
261 new->cl_pmap
= &new->cl_pmap_default
;
262 new->cl_metrics
= rpc_alloc_iostats(clnt
);
265 printk(KERN_INFO
"RPC: out of memory in %s\n", __FUNCTION__
);
266 return ERR_PTR(-ENOMEM
);
270 * Properly shut down an RPC client, terminating all outstanding
271 * requests. Note that we must be certain that cl_oneshot and
272 * cl_dead are cleared, or else the client would be destroyed
273 * when the last task releases it.
276 rpc_shutdown_client(struct rpc_clnt
*clnt
)
278 dprintk("RPC: shutting down %s client for %s, tasks=%d\n",
279 clnt
->cl_protname
, clnt
->cl_server
,
280 atomic_read(&clnt
->cl_users
));
282 while (atomic_read(&clnt
->cl_users
) > 0) {
283 /* Don't let rpc_release_client destroy us */
284 clnt
->cl_oneshot
= 0;
286 rpc_killall_tasks(clnt
);
287 wait_event_timeout(destroy_wait
,
288 !atomic_read(&clnt
->cl_users
), 1*HZ
);
291 if (atomic_read(&clnt
->cl_users
) < 0) {
292 printk(KERN_ERR
"RPC: rpc_shutdown_client clnt %p tasks=%d\n",
293 clnt
, atomic_read(&clnt
->cl_users
));
300 return rpc_destroy_client(clnt
);
304 * Delete an RPC client
307 rpc_destroy_client(struct rpc_clnt
*clnt
)
309 if (!atomic_dec_and_test(&clnt
->cl_count
))
311 BUG_ON(atomic_read(&clnt
->cl_users
) != 0);
313 dprintk("RPC: destroying %s client for %s\n",
314 clnt
->cl_protname
, clnt
->cl_server
);
316 rpcauth_destroy(clnt
->cl_auth
);
317 clnt
->cl_auth
= NULL
;
319 if (clnt
->cl_parent
!= clnt
) {
320 rpc_destroy_client(clnt
->cl_parent
);
323 if (clnt
->cl_pathname
[0])
324 rpc_rmdir(clnt
->cl_pathname
);
326 xprt_destroy(clnt
->cl_xprt
);
327 clnt
->cl_xprt
= NULL
;
329 if (clnt
->cl_server
!= clnt
->cl_inline_name
)
330 kfree(clnt
->cl_server
);
332 rpc_free_iostats(clnt
->cl_metrics
);
333 clnt
->cl_metrics
= NULL
;
334 if (!IS_ERR(clnt
->cl_dentry
)) {
335 dput(clnt
->cl_dentry
);
343 * Release an RPC client
346 rpc_release_client(struct rpc_clnt
*clnt
)
348 dprintk("RPC: rpc_release_client(%p, %d)\n",
349 clnt
, atomic_read(&clnt
->cl_users
));
351 if (!atomic_dec_and_test(&clnt
->cl_users
))
353 wake_up(&destroy_wait
);
354 if (clnt
->cl_oneshot
|| clnt
->cl_dead
)
355 rpc_destroy_client(clnt
);
359 * rpc_bind_new_program - bind a new RPC program to an existing client
360 * @old - old rpc_client
361 * @program - rpc program to set
362 * @vers - rpc program version
364 * Clones the rpc client and sets up a new RPC program. This is mainly
365 * of use for enabling different RPC programs to share the same transport.
366 * The Sun NFSv2/v3 ACL protocol can do this.
368 struct rpc_clnt
*rpc_bind_new_program(struct rpc_clnt
*old
,
369 struct rpc_program
*program
,
372 struct rpc_clnt
*clnt
;
373 struct rpc_version
*version
;
376 BUG_ON(vers
>= program
->nrvers
|| !program
->version
[vers
]);
377 version
= program
->version
[vers
];
378 clnt
= rpc_clone_client(old
);
381 clnt
->cl_procinfo
= version
->procs
;
382 clnt
->cl_maxproc
= version
->nrprocs
;
383 clnt
->cl_protname
= program
->name
;
384 clnt
->cl_prog
= program
->number
;
385 clnt
->cl_vers
= version
->number
;
386 clnt
->cl_stats
= program
->stats
;
387 err
= rpc_ping(clnt
, RPC_TASK_SOFT
|RPC_TASK_NOINTR
);
389 rpc_shutdown_client(clnt
);
397 * Default callback for async RPC calls
400 rpc_default_callback(struct rpc_task
*task
, void *data
)
404 static const struct rpc_call_ops rpc_default_ops
= {
405 .rpc_call_done
= rpc_default_callback
,
409 * Export the signal mask handling for synchronous code that
410 * sleeps on RPC calls
412 #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
414 static void rpc_save_sigmask(sigset_t
*oldset
, int intr
)
416 unsigned long sigallow
= sigmask(SIGKILL
);
419 /* Block all signals except those listed in sigallow */
421 sigallow
|= RPC_INTR_SIGNALS
;
422 siginitsetinv(&sigmask
, sigallow
);
423 sigprocmask(SIG_BLOCK
, &sigmask
, oldset
);
426 static inline void rpc_task_sigmask(struct rpc_task
*task
, sigset_t
*oldset
)
428 rpc_save_sigmask(oldset
, !RPC_TASK_UNINTERRUPTIBLE(task
));
431 static inline void rpc_restore_sigmask(sigset_t
*oldset
)
433 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
436 void rpc_clnt_sigmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
438 rpc_save_sigmask(oldset
, clnt
->cl_intr
);
441 void rpc_clnt_sigunmask(struct rpc_clnt
*clnt
, sigset_t
*oldset
)
443 rpc_restore_sigmask(oldset
);
447 * New rpc_call implementation
449 int rpc_call_sync(struct rpc_clnt
*clnt
, struct rpc_message
*msg
, int flags
)
451 struct rpc_task
*task
;
455 /* If this client is slain all further I/O fails */
459 BUG_ON(flags
& RPC_TASK_ASYNC
);
462 task
= rpc_new_task(clnt
, flags
, &rpc_default_ops
, NULL
);
466 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
467 rpc_task_sigmask(task
, &oldset
);
469 rpc_call_setup(task
, msg
, 0);
471 /* Set up the call info struct and execute the task */
472 status
= task
->tk_status
;
474 atomic_inc(&task
->tk_count
);
475 status
= rpc_execute(task
);
477 status
= task
->tk_status
;
479 rpc_restore_sigmask(&oldset
);
480 rpc_release_task(task
);
486 * New rpc_call implementation
489 rpc_call_async(struct rpc_clnt
*clnt
, struct rpc_message
*msg
, int flags
,
490 const struct rpc_call_ops
*tk_ops
, void *data
)
492 struct rpc_task
*task
;
496 /* If this client is slain all further I/O fails */
501 flags
|= RPC_TASK_ASYNC
;
503 /* Create/initialize a new RPC task */
505 if (!(task
= rpc_new_task(clnt
, flags
, tk_ops
, data
)))
508 /* Mask signals on GSS_AUTH upcalls */
509 rpc_task_sigmask(task
, &oldset
);
511 rpc_call_setup(task
, msg
, 0);
513 /* Set up the call info struct and execute the task */
514 status
= task
->tk_status
;
518 rpc_release_task(task
);
520 rpc_restore_sigmask(&oldset
);
523 if (tk_ops
->rpc_release
!= NULL
)
524 tk_ops
->rpc_release(data
);
530 rpc_call_setup(struct rpc_task
*task
, struct rpc_message
*msg
, int flags
)
533 task
->tk_flags
|= flags
;
534 /* Bind the user cred */
535 if (task
->tk_msg
.rpc_cred
!= NULL
)
536 rpcauth_holdcred(task
);
538 rpcauth_bindcred(task
);
540 if (task
->tk_status
== 0)
541 task
->tk_action
= call_start
;
543 task
->tk_action
= rpc_exit_task
;
547 rpc_setbufsize(struct rpc_clnt
*clnt
, unsigned int sndsize
, unsigned int rcvsize
)
549 struct rpc_xprt
*xprt
= clnt
->cl_xprt
;
550 if (xprt
->ops
->set_buffer_size
)
551 xprt
->ops
->set_buffer_size(xprt
, sndsize
, rcvsize
);
555 * Return size of largest payload RPC client can support, in bytes
557 * For stream transports, this is one RPC record fragment (see RFC
558 * 1831), as we don't support multi-record requests yet. For datagram
559 * transports, this is the size of an IP packet minus the IP, UDP, and
562 size_t rpc_max_payload(struct rpc_clnt
*clnt
)
564 return clnt
->cl_xprt
->max_payload
;
566 EXPORT_SYMBOL(rpc_max_payload
);
569 * rpc_force_rebind - force transport to check that remote port is unchanged
570 * @clnt: client to rebind
573 void rpc_force_rebind(struct rpc_clnt
*clnt
)
575 if (clnt
->cl_autobind
)
578 EXPORT_SYMBOL(rpc_force_rebind
);
581 * Restart an (async) RPC call. Usually called from within the
585 rpc_restart_call(struct rpc_task
*task
)
587 if (RPC_ASSASSINATED(task
))
590 task
->tk_action
= call_start
;
596 * Other FSM states can be visited zero or more times, but
597 * this state is visited exactly once for each RPC.
600 call_start(struct rpc_task
*task
)
602 struct rpc_clnt
*clnt
= task
->tk_client
;
604 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task
->tk_pid
,
605 clnt
->cl_protname
, clnt
->cl_vers
, task
->tk_msg
.rpc_proc
->p_proc
,
606 (RPC_IS_ASYNC(task
) ? "async" : "sync"));
608 /* Increment call count */
609 task
->tk_msg
.rpc_proc
->p_count
++;
610 clnt
->cl_stats
->rpccnt
++;
611 task
->tk_action
= call_reserve
;
615 * 1. Reserve an RPC call slot
618 call_reserve(struct rpc_task
*task
)
620 dprintk("RPC: %4d call_reserve\n", task
->tk_pid
);
622 if (!rpcauth_uptodatecred(task
)) {
623 task
->tk_action
= call_refresh
;
628 task
->tk_action
= call_reserveresult
;
633 * 1b. Grok the result of xprt_reserve()
636 call_reserveresult(struct rpc_task
*task
)
638 int status
= task
->tk_status
;
640 dprintk("RPC: %4d call_reserveresult (status %d)\n",
641 task
->tk_pid
, task
->tk_status
);
644 * After a call to xprt_reserve(), we must have either
645 * a request slot or else an error status.
649 if (task
->tk_rqstp
) {
650 task
->tk_action
= call_allocate
;
654 printk(KERN_ERR
"%s: status=%d, but no request slot, exiting\n",
655 __FUNCTION__
, status
);
656 rpc_exit(task
, -EIO
);
661 * Even though there was an error, we may have acquired
662 * a request slot somehow. Make sure not to leak it.
664 if (task
->tk_rqstp
) {
665 printk(KERN_ERR
"%s: status=%d, request allocated anyway\n",
666 __FUNCTION__
, status
);
671 case -EAGAIN
: /* woken up; retry */
672 task
->tk_action
= call_reserve
;
674 case -EIO
: /* probably a shutdown */
677 printk(KERN_ERR
"%s: unrecognized error %d, exiting\n",
678 __FUNCTION__
, status
);
681 rpc_exit(task
, status
);
685 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
686 * (Note: buffer memory is freed in xprt_release).
689 call_allocate(struct rpc_task
*task
)
691 struct rpc_rqst
*req
= task
->tk_rqstp
;
692 struct rpc_xprt
*xprt
= task
->tk_xprt
;
695 dprintk("RPC: %4d call_allocate (status %d)\n",
696 task
->tk_pid
, task
->tk_status
);
697 task
->tk_action
= call_bind
;
701 /* FIXME: compute buffer requirements more exactly using
703 bufsiz
= task
->tk_msg
.rpc_proc
->p_bufsiz
+ RPC_SLACK_SPACE
;
705 if (xprt
->ops
->buf_alloc(task
, bufsiz
<< 1) != NULL
)
707 printk(KERN_INFO
"RPC: buffer allocation failed for task %p\n", task
);
709 if (RPC_IS_ASYNC(task
) || !signalled()) {
711 task
->tk_action
= call_reserve
;
712 rpc_delay(task
, HZ
>>4);
716 rpc_exit(task
, -ERESTARTSYS
);
720 rpc_task_need_encode(struct rpc_task
*task
)
722 return task
->tk_rqstp
->rq_snd_buf
.len
== 0;
726 rpc_task_force_reencode(struct rpc_task
*task
)
728 task
->tk_rqstp
->rq_snd_buf
.len
= 0;
732 * 3. Encode arguments of an RPC call
735 call_encode(struct rpc_task
*task
)
737 struct rpc_rqst
*req
= task
->tk_rqstp
;
738 struct xdr_buf
*sndbuf
= &req
->rq_snd_buf
;
739 struct xdr_buf
*rcvbuf
= &req
->rq_rcv_buf
;
744 dprintk("RPC: %4d call_encode (status %d)\n",
745 task
->tk_pid
, task
->tk_status
);
747 /* Default buffer setup */
748 bufsiz
= req
->rq_bufsize
>> 1;
749 sndbuf
->head
[0].iov_base
= (void *)req
->rq_buffer
;
750 sndbuf
->head
[0].iov_len
= bufsiz
;
751 sndbuf
->tail
[0].iov_len
= 0;
752 sndbuf
->page_len
= 0;
754 sndbuf
->buflen
= bufsiz
;
755 rcvbuf
->head
[0].iov_base
= (void *)((char *)req
->rq_buffer
+ bufsiz
);
756 rcvbuf
->head
[0].iov_len
= bufsiz
;
757 rcvbuf
->tail
[0].iov_len
= 0;
758 rcvbuf
->page_len
= 0;
760 rcvbuf
->buflen
= bufsiz
;
762 /* Encode header and provided arguments */
763 encode
= task
->tk_msg
.rpc_proc
->p_encode
;
764 if (!(p
= call_header(task
))) {
765 printk(KERN_INFO
"RPC: call_header failed, exit EIO\n");
766 rpc_exit(task
, -EIO
);
772 task
->tk_status
= rpcauth_wrap_req(task
, encode
, req
, p
,
773 task
->tk_msg
.rpc_argp
);
774 if (task
->tk_status
== -ENOMEM
) {
775 /* XXX: Is this sane? */
776 rpc_delay(task
, 3*HZ
);
777 task
->tk_status
= -EAGAIN
;
782 * 4. Get the server port number if not yet set
785 call_bind(struct rpc_task
*task
)
787 struct rpc_clnt
*clnt
= task
->tk_client
;
789 dprintk("RPC: %4d call_bind (status %d)\n",
790 task
->tk_pid
, task
->tk_status
);
792 task
->tk_action
= call_connect
;
793 if (!clnt
->cl_port
) {
794 task
->tk_action
= call_bind_status
;
795 task
->tk_timeout
= task
->tk_xprt
->bind_timeout
;
796 rpc_getport(task
, clnt
);
801 * 4a. Sort out bind result
804 call_bind_status(struct rpc_task
*task
)
806 int status
= -EACCES
;
808 if (task
->tk_status
>= 0) {
809 dprintk("RPC: %4d call_bind_status (status %d)\n",
810 task
->tk_pid
, task
->tk_status
);
812 task
->tk_action
= call_connect
;
816 switch (task
->tk_status
) {
818 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n",
820 rpc_delay(task
, 3*HZ
);
823 dprintk("RPC: %4d rpcbind request timed out\n",
825 if (RPC_IS_SOFT(task
)) {
831 dprintk("RPC: %4d remote rpcbind service unavailable\n",
834 case -EPROTONOSUPPORT
:
835 dprintk("RPC: %4d remote rpcbind version 2 unavailable\n",
839 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n",
840 task
->tk_pid
, -task
->tk_status
);
845 rpc_exit(task
, status
);
850 task
->tk_action
= call_bind
;
855 * 4b. Connect to the RPC server
858 call_connect(struct rpc_task
*task
)
860 struct rpc_xprt
*xprt
= task
->tk_xprt
;
862 dprintk("RPC: %4d call_connect xprt %p %s connected\n",
864 (xprt_connected(xprt
) ? "is" : "is not"));
866 task
->tk_action
= call_transmit
;
867 if (!xprt_connected(xprt
)) {
868 task
->tk_action
= call_connect_status
;
869 if (task
->tk_status
< 0)
876 * 4c. Sort out connect result
879 call_connect_status(struct rpc_task
*task
)
881 struct rpc_clnt
*clnt
= task
->tk_client
;
882 int status
= task
->tk_status
;
884 dprintk("RPC: %5u call_connect_status (status %d)\n",
885 task
->tk_pid
, task
->tk_status
);
889 clnt
->cl_stats
->netreconn
++;
890 task
->tk_action
= call_transmit
;
894 /* Something failed: remote service port may have changed */
895 rpc_force_rebind(clnt
);
901 task
->tk_action
= call_bind
;
904 rpc_exit(task
, -EIO
);
910 * 5. Transmit the RPC request, and wait for reply
913 call_transmit(struct rpc_task
*task
)
915 dprintk("RPC: %4d call_transmit (status %d)\n",
916 task
->tk_pid
, task
->tk_status
);
918 task
->tk_action
= call_status
;
919 if (task
->tk_status
< 0)
921 task
->tk_status
= xprt_prepare_transmit(task
);
922 if (task
->tk_status
!= 0)
924 task
->tk_action
= call_transmit_status
;
925 /* Encode here so that rpcsec_gss can use correct sequence number. */
926 if (rpc_task_need_encode(task
)) {
927 BUG_ON(task
->tk_rqstp
->rq_bytes_sent
!= 0);
929 /* Did the encode result in an error condition? */
930 if (task
->tk_status
!= 0)
934 if (task
->tk_status
< 0)
937 * On success, ensure that we call xprt_end_transmit() before sleeping
938 * in order to allow access to the socket to other RPC requests.
940 call_transmit_status(task
);
941 if (task
->tk_msg
.rpc_proc
->p_decode
!= NULL
)
943 task
->tk_action
= rpc_exit_task
;
944 rpc_wake_up_task(task
);
948 * 5a. Handle cleanup after a transmission
951 call_transmit_status(struct rpc_task
*task
)
953 task
->tk_action
= call_status
;
955 * Special case: if we've been waiting on the socket's write_space()
956 * callback, then don't call xprt_end_transmit().
958 if (task
->tk_status
== -EAGAIN
)
960 xprt_end_transmit(task
);
961 rpc_task_force_reencode(task
);
965 * 6. Sort out the RPC call status
968 call_status(struct rpc_task
*task
)
970 struct rpc_clnt
*clnt
= task
->tk_client
;
971 struct rpc_rqst
*req
= task
->tk_rqstp
;
974 if (req
->rq_received
> 0 && !req
->rq_bytes_sent
)
975 task
->tk_status
= req
->rq_received
;
977 dprintk("RPC: %4d call_status (status %d)\n",
978 task
->tk_pid
, task
->tk_status
);
980 status
= task
->tk_status
;
982 task
->tk_action
= call_decode
;
989 task
->tk_action
= call_timeout
;
993 rpc_force_rebind(clnt
);
994 task
->tk_action
= call_bind
;
997 task
->tk_action
= call_transmit
;
1000 /* shutdown or soft timeout */
1001 rpc_exit(task
, status
);
1004 printk("%s: RPC call returned error %d\n",
1005 clnt
->cl_protname
, -status
);
1006 rpc_exit(task
, status
);
1012 * 6a. Handle RPC timeout
1013 * We do not release the request slot, so we keep using the
1014 * same XID for all retransmits.
1017 call_timeout(struct rpc_task
*task
)
1019 struct rpc_clnt
*clnt
= task
->tk_client
;
1021 if (xprt_adjust_timeout(task
->tk_rqstp
) == 0) {
1022 dprintk("RPC: %4d call_timeout (minor)\n", task
->tk_pid
);
1026 dprintk("RPC: %4d call_timeout (major)\n", task
->tk_pid
);
1027 task
->tk_timeouts
++;
1029 if (RPC_IS_SOFT(task
)) {
1030 printk(KERN_NOTICE
"%s: server %s not responding, timed out\n",
1031 clnt
->cl_protname
, clnt
->cl_server
);
1032 rpc_exit(task
, -EIO
);
1036 if (!(task
->tk_flags
& RPC_CALL_MAJORSEEN
)) {
1037 task
->tk_flags
|= RPC_CALL_MAJORSEEN
;
1038 printk(KERN_NOTICE
"%s: server %s not responding, still trying\n",
1039 clnt
->cl_protname
, clnt
->cl_server
);
1041 rpc_force_rebind(clnt
);
1044 clnt
->cl_stats
->rpcretrans
++;
1045 task
->tk_action
= call_bind
;
1046 task
->tk_status
= 0;
1050 * 7. Decode the RPC reply
1053 call_decode(struct rpc_task
*task
)
1055 struct rpc_clnt
*clnt
= task
->tk_client
;
1056 struct rpc_rqst
*req
= task
->tk_rqstp
;
1057 kxdrproc_t decode
= task
->tk_msg
.rpc_proc
->p_decode
;
1060 dprintk("RPC: %4d call_decode (status %d)\n",
1061 task
->tk_pid
, task
->tk_status
);
1063 if (task
->tk_flags
& RPC_CALL_MAJORSEEN
) {
1064 printk(KERN_NOTICE
"%s: server %s OK\n",
1065 clnt
->cl_protname
, clnt
->cl_server
);
1066 task
->tk_flags
&= ~RPC_CALL_MAJORSEEN
;
1069 if (task
->tk_status
< 12) {
1070 if (!RPC_IS_SOFT(task
)) {
1071 task
->tk_action
= call_bind
;
1072 clnt
->cl_stats
->rpcretrans
++;
1075 printk(KERN_WARNING
"%s: too small RPC reply size (%d bytes)\n",
1076 clnt
->cl_protname
, task
->tk_status
);
1077 rpc_exit(task
, -EIO
);
1082 * Ensure that we see all writes made by xprt_complete_rqst()
1083 * before it changed req->rq_received.
1086 req
->rq_rcv_buf
.len
= req
->rq_private_buf
.len
;
1088 /* Check that the softirq receive buffer is valid */
1089 WARN_ON(memcmp(&req
->rq_rcv_buf
, &req
->rq_private_buf
,
1090 sizeof(req
->rq_rcv_buf
)) != 0);
1092 /* Verify the RPC header */
1093 p
= call_verify(task
);
1095 if (p
== ERR_PTR(-EAGAIN
))
1100 task
->tk_action
= rpc_exit_task
;
1103 task
->tk_status
= rpcauth_unwrap_resp(task
, decode
, req
, p
,
1104 task
->tk_msg
.rpc_resp
);
1105 dprintk("RPC: %4d call_decode result %d\n", task
->tk_pid
,
1109 req
->rq_received
= req
->rq_private_buf
.len
= 0;
1110 task
->tk_status
= 0;
1114 * 8. Refresh the credentials if rejected by the server
1117 call_refresh(struct rpc_task
*task
)
1119 dprintk("RPC: %4d call_refresh\n", task
->tk_pid
);
1121 xprt_release(task
); /* Must do to obtain new XID */
1122 task
->tk_action
= call_refreshresult
;
1123 task
->tk_status
= 0;
1124 task
->tk_client
->cl_stats
->rpcauthrefresh
++;
1125 rpcauth_refreshcred(task
);
1129 * 8a. Process the results of a credential refresh
1132 call_refreshresult(struct rpc_task
*task
)
1134 int status
= task
->tk_status
;
1135 dprintk("RPC: %4d call_refreshresult (status %d)\n",
1136 task
->tk_pid
, task
->tk_status
);
1138 task
->tk_status
= 0;
1139 task
->tk_action
= call_reserve
;
1140 if (status
>= 0 && rpcauth_uptodatecred(task
))
1142 if (status
== -EACCES
) {
1143 rpc_exit(task
, -EACCES
);
1146 task
->tk_action
= call_refresh
;
1147 if (status
!= -ETIMEDOUT
)
1148 rpc_delay(task
, 3*HZ
);
1153 * Call header serialization
1156 call_header(struct rpc_task
*task
)
1158 struct rpc_clnt
*clnt
= task
->tk_client
;
1159 struct rpc_rqst
*req
= task
->tk_rqstp
;
1160 u32
*p
= req
->rq_svec
[0].iov_base
;
1162 /* FIXME: check buffer size? */
1164 p
= xprt_skip_transport_header(task
->tk_xprt
, p
);
1165 *p
++ = req
->rq_xid
; /* XID */
1166 *p
++ = htonl(RPC_CALL
); /* CALL */
1167 *p
++ = htonl(RPC_VERSION
); /* RPC version */
1168 *p
++ = htonl(clnt
->cl_prog
); /* program number */
1169 *p
++ = htonl(clnt
->cl_vers
); /* program version */
1170 *p
++ = htonl(task
->tk_msg
.rpc_proc
->p_proc
); /* procedure */
1171 p
= rpcauth_marshcred(task
, p
);
1172 req
->rq_slen
= xdr_adjust_iovec(&req
->rq_svec
[0], p
);
1177 * Reply header verification
1180 call_verify(struct rpc_task
*task
)
1182 struct kvec
*iov
= &task
->tk_rqstp
->rq_rcv_buf
.head
[0];
1183 int len
= task
->tk_rqstp
->rq_rcv_buf
.len
>> 2;
1184 u32
*p
= iov
->iov_base
, n
;
1185 int error
= -EACCES
;
1189 p
+= 1; /* skip XID */
1191 if ((n
= ntohl(*p
++)) != RPC_REPLY
) {
1192 printk(KERN_WARNING
"call_verify: not an RPC reply: %x\n", n
);
1195 if ((n
= ntohl(*p
++)) != RPC_MSG_ACCEPTED
) {
1198 switch ((n
= ntohl(*p
++))) {
1199 case RPC_AUTH_ERROR
:
1202 dprintk("%s: RPC call version mismatch!\n", __FUNCTION__
);
1203 error
= -EPROTONOSUPPORT
;
1206 dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__
, n
);
1211 switch ((n
= ntohl(*p
++))) {
1212 case RPC_AUTH_REJECTEDCRED
:
1213 case RPC_AUTH_REJECTEDVERF
:
1214 case RPCSEC_GSS_CREDPROBLEM
:
1215 case RPCSEC_GSS_CTXPROBLEM
:
1216 if (!task
->tk_cred_retry
)
1218 task
->tk_cred_retry
--;
1219 dprintk("RPC: %4d call_verify: retry stale creds\n",
1221 rpcauth_invalcred(task
);
1222 task
->tk_action
= call_refresh
;
1224 case RPC_AUTH_BADCRED
:
1225 case RPC_AUTH_BADVERF
:
1226 /* possibly garbled cred/verf? */
1227 if (!task
->tk_garb_retry
)
1229 task
->tk_garb_retry
--;
1230 dprintk("RPC: %4d call_verify: retry garbled creds\n",
1232 task
->tk_action
= call_bind
;
1234 case RPC_AUTH_TOOWEAK
:
1235 printk(KERN_NOTICE
"call_verify: server %s requires stronger "
1236 "authentication.\n", task
->tk_client
->cl_server
);
1239 printk(KERN_WARNING
"call_verify: unknown auth error: %x\n", n
);
1242 dprintk("RPC: %4d call_verify: call rejected %d\n",
1246 if (!(p
= rpcauth_checkverf(task
, p
))) {
1247 printk(KERN_WARNING
"call_verify: auth check failed\n");
1248 goto out_garbage
; /* bad verifier, retry */
1250 len
= p
- (u32
*)iov
->iov_base
- 1;
1253 switch ((n
= ntohl(*p
++))) {
1256 case RPC_PROG_UNAVAIL
:
1257 dprintk("RPC: call_verify: program %u is unsupported by server %s\n",
1258 (unsigned int)task
->tk_client
->cl_prog
,
1259 task
->tk_client
->cl_server
);
1260 error
= -EPFNOSUPPORT
;
1262 case RPC_PROG_MISMATCH
:
1263 dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n",
1264 (unsigned int)task
->tk_client
->cl_prog
,
1265 (unsigned int)task
->tk_client
->cl_vers
,
1266 task
->tk_client
->cl_server
);
1267 error
= -EPROTONOSUPPORT
;
1269 case RPC_PROC_UNAVAIL
:
1270 dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n",
1271 task
->tk_msg
.rpc_proc
,
1272 task
->tk_client
->cl_prog
,
1273 task
->tk_client
->cl_vers
,
1274 task
->tk_client
->cl_server
);
1275 error
= -EOPNOTSUPP
;
1277 case RPC_GARBAGE_ARGS
:
1278 dprintk("RPC: %4d %s: server saw garbage\n", task
->tk_pid
, __FUNCTION__
);
1281 printk(KERN_WARNING
"call_verify: server accept status: %x\n", n
);
1286 task
->tk_client
->cl_stats
->rpcgarbage
++;
1287 if (task
->tk_garb_retry
) {
1288 task
->tk_garb_retry
--;
1289 dprintk("RPC %s: retrying %4d\n", __FUNCTION__
, task
->tk_pid
);
1290 task
->tk_action
= call_bind
;
1292 return ERR_PTR(-EAGAIN
);
1294 printk(KERN_WARNING
"RPC %s: retry failed, exit EIO\n", __FUNCTION__
);
1298 rpc_exit(task
, error
);
1299 return ERR_PTR(error
);
1301 printk(KERN_WARNING
"RPC %s: server reply was truncated.\n", __FUNCTION__
);
1305 static int rpcproc_encode_null(void *rqstp
, u32
*data
, void *obj
)
1310 static int rpcproc_decode_null(void *rqstp
, u32
*data
, void *obj
)
1315 static struct rpc_procinfo rpcproc_null
= {
1316 .p_encode
= rpcproc_encode_null
,
1317 .p_decode
= rpcproc_decode_null
,
1320 int rpc_ping(struct rpc_clnt
*clnt
, int flags
)
1322 struct rpc_message msg
= {
1323 .rpc_proc
= &rpcproc_null
,
1326 msg
.rpc_cred
= authnull_ops
.lookup_cred(NULL
, NULL
, 0);
1327 err
= rpc_call_sync(clnt
, &msg
, flags
);
1328 put_rpccred(msg
.rpc_cred
);