]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/nfs/nfs4proc.c
Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-jammy-kernel.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/xattr.h>
55 #include <linux/utsname.h>
56 #include <linux/freezer.h>
57 #include <linux/iversion.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4idmap.h"
67 #include "nfs4session.h"
68 #include "fscache.h"
69
70 #include "nfs4trace.h"
71
72 #define NFSDBG_FACILITY NFSDBG_PROC
73
74 #define NFS4_POLL_RETRY_MIN (HZ/10)
75 #define NFS4_POLL_RETRY_MAX (15*HZ)
76
77 /* file attributes which can be mapped to nfs attributes */
78 #define NFS4_VALID_ATTRS (ATTR_MODE \
79 | ATTR_UID \
80 | ATTR_GID \
81 | ATTR_SIZE \
82 | ATTR_ATIME \
83 | ATTR_MTIME \
84 | ATTR_CTIME \
85 | ATTR_ATIME_SET \
86 | ATTR_MTIME_SET)
87
88 struct nfs4_opendata;
89 static int _nfs4_proc_open(struct nfs4_opendata *data);
90 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
91 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
92 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
93 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
94 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
95 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
96 struct nfs_fattr *fattr, struct iattr *sattr,
97 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
98 struct nfs4_label *olabel);
99 #ifdef CONFIG_NFS_V4_1
100 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
101 struct rpc_cred *cred,
102 struct nfs4_slot *slot,
103 bool is_privileged);
104 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
105 struct rpc_cred *);
106 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
107 struct rpc_cred *, bool);
108 #endif
109
110 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
111 static inline struct nfs4_label *
112 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
113 struct iattr *sattr, struct nfs4_label *label)
114 {
115 int err;
116
117 if (label == NULL)
118 return NULL;
119
120 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
121 return NULL;
122
123 err = security_dentry_init_security(dentry, sattr->ia_mode,
124 &dentry->d_name, (void **)&label->label, &label->len);
125 if (err == 0)
126 return label;
127
128 return NULL;
129 }
130 static inline void
131 nfs4_label_release_security(struct nfs4_label *label)
132 {
133 if (label)
134 security_release_secctx(label->label, label->len);
135 }
136 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
137 {
138 if (label)
139 return server->attr_bitmask;
140
141 return server->attr_bitmask_nl;
142 }
143 #else
144 static inline struct nfs4_label *
145 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
146 struct iattr *sattr, struct nfs4_label *l)
147 { return NULL; }
148 static inline void
149 nfs4_label_release_security(struct nfs4_label *label)
150 { return; }
151 static inline u32 *
152 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
153 { return server->attr_bitmask; }
154 #endif
155
156 /* Prevent leaks of NFSv4 errors into userland */
157 static int nfs4_map_errors(int err)
158 {
159 if (err >= -1000)
160 return err;
161 switch (err) {
162 case -NFS4ERR_RESOURCE:
163 case -NFS4ERR_LAYOUTTRYLATER:
164 case -NFS4ERR_RECALLCONFLICT:
165 return -EREMOTEIO;
166 case -NFS4ERR_WRONGSEC:
167 case -NFS4ERR_WRONG_CRED:
168 return -EPERM;
169 case -NFS4ERR_BADOWNER:
170 case -NFS4ERR_BADNAME:
171 return -EINVAL;
172 case -NFS4ERR_SHARE_DENIED:
173 return -EACCES;
174 case -NFS4ERR_MINOR_VERS_MISMATCH:
175 return -EPROTONOSUPPORT;
176 case -NFS4ERR_FILE_OPEN:
177 return -EBUSY;
178 default:
179 dprintk("%s could not handle NFSv4 error %d\n",
180 __func__, -err);
181 break;
182 }
183 return -EIO;
184 }
185
186 /*
187 * This is our standard bitmap for GETATTR requests.
188 */
189 const u32 nfs4_fattr_bitmap[3] = {
190 FATTR4_WORD0_TYPE
191 | FATTR4_WORD0_CHANGE
192 | FATTR4_WORD0_SIZE
193 | FATTR4_WORD0_FSID
194 | FATTR4_WORD0_FILEID,
195 FATTR4_WORD1_MODE
196 | FATTR4_WORD1_NUMLINKS
197 | FATTR4_WORD1_OWNER
198 | FATTR4_WORD1_OWNER_GROUP
199 | FATTR4_WORD1_RAWDEV
200 | FATTR4_WORD1_SPACE_USED
201 | FATTR4_WORD1_TIME_ACCESS
202 | FATTR4_WORD1_TIME_METADATA
203 | FATTR4_WORD1_TIME_MODIFY
204 | FATTR4_WORD1_MOUNTED_ON_FILEID,
205 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
206 FATTR4_WORD2_SECURITY_LABEL
207 #endif
208 };
209
210 static const u32 nfs4_pnfs_open_bitmap[3] = {
211 FATTR4_WORD0_TYPE
212 | FATTR4_WORD0_CHANGE
213 | FATTR4_WORD0_SIZE
214 | FATTR4_WORD0_FSID
215 | FATTR4_WORD0_FILEID,
216 FATTR4_WORD1_MODE
217 | FATTR4_WORD1_NUMLINKS
218 | FATTR4_WORD1_OWNER
219 | FATTR4_WORD1_OWNER_GROUP
220 | FATTR4_WORD1_RAWDEV
221 | FATTR4_WORD1_SPACE_USED
222 | FATTR4_WORD1_TIME_ACCESS
223 | FATTR4_WORD1_TIME_METADATA
224 | FATTR4_WORD1_TIME_MODIFY,
225 FATTR4_WORD2_MDSTHRESHOLD
226 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
227 | FATTR4_WORD2_SECURITY_LABEL
228 #endif
229 };
230
231 static const u32 nfs4_open_noattr_bitmap[3] = {
232 FATTR4_WORD0_TYPE
233 | FATTR4_WORD0_FILEID,
234 };
235
236 const u32 nfs4_statfs_bitmap[3] = {
237 FATTR4_WORD0_FILES_AVAIL
238 | FATTR4_WORD0_FILES_FREE
239 | FATTR4_WORD0_FILES_TOTAL,
240 FATTR4_WORD1_SPACE_AVAIL
241 | FATTR4_WORD1_SPACE_FREE
242 | FATTR4_WORD1_SPACE_TOTAL
243 };
244
245 const u32 nfs4_pathconf_bitmap[3] = {
246 FATTR4_WORD0_MAXLINK
247 | FATTR4_WORD0_MAXNAME,
248 0
249 };
250
251 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
252 | FATTR4_WORD0_MAXREAD
253 | FATTR4_WORD0_MAXWRITE
254 | FATTR4_WORD0_LEASE_TIME,
255 FATTR4_WORD1_TIME_DELTA
256 | FATTR4_WORD1_FS_LAYOUT_TYPES,
257 FATTR4_WORD2_LAYOUT_BLKSIZE
258 | FATTR4_WORD2_CLONE_BLKSIZE
259 };
260
261 const u32 nfs4_fs_locations_bitmap[3] = {
262 FATTR4_WORD0_CHANGE
263 | FATTR4_WORD0_SIZE
264 | FATTR4_WORD0_FSID
265 | FATTR4_WORD0_FILEID
266 | FATTR4_WORD0_FS_LOCATIONS,
267 FATTR4_WORD1_OWNER
268 | FATTR4_WORD1_OWNER_GROUP
269 | FATTR4_WORD1_RAWDEV
270 | FATTR4_WORD1_SPACE_USED
271 | FATTR4_WORD1_TIME_ACCESS
272 | FATTR4_WORD1_TIME_METADATA
273 | FATTR4_WORD1_TIME_MODIFY
274 | FATTR4_WORD1_MOUNTED_ON_FILEID,
275 };
276
277 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
278 struct nfs4_readdir_arg *readdir)
279 {
280 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
281 __be32 *start, *p;
282
283 if (cookie > 2) {
284 readdir->cookie = cookie;
285 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
286 return;
287 }
288
289 readdir->cookie = 0;
290 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
291 if (cookie == 2)
292 return;
293
294 /*
295 * NFSv4 servers do not return entries for '.' and '..'
296 * Therefore, we fake these entries here. We let '.'
297 * have cookie 0 and '..' have cookie 1. Note that
298 * when talking to the server, we always send cookie 0
299 * instead of 1 or 2.
300 */
301 start = p = kmap_atomic(*readdir->pages);
302
303 if (cookie == 0) {
304 *p++ = xdr_one; /* next */
305 *p++ = xdr_zero; /* cookie, first word */
306 *p++ = xdr_one; /* cookie, second word */
307 *p++ = xdr_one; /* entry len */
308 memcpy(p, ".\0\0\0", 4); /* entry */
309 p++;
310 *p++ = xdr_one; /* bitmap length */
311 *p++ = htonl(attrs); /* bitmap */
312 *p++ = htonl(12); /* attribute buffer length */
313 *p++ = htonl(NF4DIR);
314 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
315 }
316
317 *p++ = xdr_one; /* next */
318 *p++ = xdr_zero; /* cookie, first word */
319 *p++ = xdr_two; /* cookie, second word */
320 *p++ = xdr_two; /* entry len */
321 memcpy(p, "..\0\0", 4); /* entry */
322 p++;
323 *p++ = xdr_one; /* bitmap length */
324 *p++ = htonl(attrs); /* bitmap */
325 *p++ = htonl(12); /* attribute buffer length */
326 *p++ = htonl(NF4DIR);
327 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
328
329 readdir->pgbase = (char *)p - (char *)start;
330 readdir->count -= readdir->pgbase;
331 kunmap_atomic(start);
332 }
333
334 static void nfs4_test_and_free_stateid(struct nfs_server *server,
335 nfs4_stateid *stateid,
336 struct rpc_cred *cred)
337 {
338 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
339
340 ops->test_and_free_expired(server, stateid, cred);
341 }
342
343 static void __nfs4_free_revoked_stateid(struct nfs_server *server,
344 nfs4_stateid *stateid,
345 struct rpc_cred *cred)
346 {
347 stateid->type = NFS4_REVOKED_STATEID_TYPE;
348 nfs4_test_and_free_stateid(server, stateid, cred);
349 }
350
351 static void nfs4_free_revoked_stateid(struct nfs_server *server,
352 const nfs4_stateid *stateid,
353 struct rpc_cred *cred)
354 {
355 nfs4_stateid tmp;
356
357 nfs4_stateid_copy(&tmp, stateid);
358 __nfs4_free_revoked_stateid(server, &tmp, cred);
359 }
360
361 static long nfs4_update_delay(long *timeout)
362 {
363 long ret;
364 if (!timeout)
365 return NFS4_POLL_RETRY_MAX;
366 if (*timeout <= 0)
367 *timeout = NFS4_POLL_RETRY_MIN;
368 if (*timeout > NFS4_POLL_RETRY_MAX)
369 *timeout = NFS4_POLL_RETRY_MAX;
370 ret = *timeout;
371 *timeout <<= 1;
372 return ret;
373 }
374
375 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
376 {
377 int res = 0;
378
379 might_sleep();
380
381 freezable_schedule_timeout_killable_unsafe(
382 nfs4_update_delay(timeout));
383 if (fatal_signal_pending(current))
384 res = -ERESTARTSYS;
385 return res;
386 }
387
388 /* This is the error handling routine for processes that are allowed
389 * to sleep.
390 */
391 static int nfs4_do_handle_exception(struct nfs_server *server,
392 int errorcode, struct nfs4_exception *exception)
393 {
394 struct nfs_client *clp = server->nfs_client;
395 struct nfs4_state *state = exception->state;
396 const nfs4_stateid *stateid = exception->stateid;
397 struct inode *inode = exception->inode;
398 int ret = errorcode;
399
400 exception->delay = 0;
401 exception->recovering = 0;
402 exception->retry = 0;
403
404 if (stateid == NULL && state != NULL)
405 stateid = &state->stateid;
406
407 switch(errorcode) {
408 case 0:
409 return 0;
410 case -NFS4ERR_DELEG_REVOKED:
411 case -NFS4ERR_ADMIN_REVOKED:
412 case -NFS4ERR_EXPIRED:
413 case -NFS4ERR_BAD_STATEID:
414 if (inode != NULL && stateid != NULL) {
415 nfs_inode_find_state_and_recover(inode,
416 stateid);
417 goto wait_on_recovery;
418 }
419 case -NFS4ERR_OPENMODE:
420 if (inode) {
421 int err;
422
423 err = nfs_async_inode_return_delegation(inode,
424 stateid);
425 if (err == 0)
426 goto wait_on_recovery;
427 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
428 exception->retry = 1;
429 break;
430 }
431 }
432 if (state == NULL)
433 break;
434 ret = nfs4_schedule_stateid_recovery(server, state);
435 if (ret < 0)
436 break;
437 goto wait_on_recovery;
438 case -NFS4ERR_STALE_STATEID:
439 case -NFS4ERR_STALE_CLIENTID:
440 nfs4_schedule_lease_recovery(clp);
441 goto wait_on_recovery;
442 case -NFS4ERR_MOVED:
443 ret = nfs4_schedule_migration_recovery(server);
444 if (ret < 0)
445 break;
446 goto wait_on_recovery;
447 case -NFS4ERR_LEASE_MOVED:
448 nfs4_schedule_lease_moved_recovery(clp);
449 goto wait_on_recovery;
450 #if defined(CONFIG_NFS_V4_1)
451 case -NFS4ERR_BADSESSION:
452 case -NFS4ERR_BADSLOT:
453 case -NFS4ERR_BAD_HIGH_SLOT:
454 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
455 case -NFS4ERR_DEADSESSION:
456 case -NFS4ERR_SEQ_FALSE_RETRY:
457 case -NFS4ERR_SEQ_MISORDERED:
458 dprintk("%s ERROR: %d Reset session\n", __func__,
459 errorcode);
460 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
461 goto wait_on_recovery;
462 #endif /* defined(CONFIG_NFS_V4_1) */
463 case -NFS4ERR_FILE_OPEN:
464 if (exception->timeout > HZ) {
465 /* We have retried a decent amount, time to
466 * fail
467 */
468 ret = -EBUSY;
469 break;
470 }
471 case -NFS4ERR_DELAY:
472 nfs_inc_server_stats(server, NFSIOS_DELAY);
473 case -NFS4ERR_GRACE:
474 case -NFS4ERR_LAYOUTTRYLATER:
475 case -NFS4ERR_RECALLCONFLICT:
476 exception->delay = 1;
477 return 0;
478
479 case -NFS4ERR_RETRY_UNCACHED_REP:
480 case -NFS4ERR_OLD_STATEID:
481 exception->retry = 1;
482 break;
483 case -NFS4ERR_BADOWNER:
484 /* The following works around a Linux server bug! */
485 case -NFS4ERR_BADNAME:
486 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
487 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
488 exception->retry = 1;
489 printk(KERN_WARNING "NFS: v4 server %s "
490 "does not accept raw "
491 "uid/gids. "
492 "Reenabling the idmapper.\n",
493 server->nfs_client->cl_hostname);
494 }
495 }
496 /* We failed to handle the error */
497 return nfs4_map_errors(ret);
498 wait_on_recovery:
499 exception->recovering = 1;
500 return 0;
501 }
502
503 /* This is the error handling routine for processes that are allowed
504 * to sleep.
505 */
506 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
507 {
508 struct nfs_client *clp = server->nfs_client;
509 int ret;
510
511 ret = nfs4_do_handle_exception(server, errorcode, exception);
512 if (exception->delay) {
513 ret = nfs4_delay(server->client, &exception->timeout);
514 goto out_retry;
515 }
516 if (exception->recovering) {
517 ret = nfs4_wait_clnt_recover(clp);
518 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
519 return -EIO;
520 goto out_retry;
521 }
522 return ret;
523 out_retry:
524 if (ret == 0)
525 exception->retry = 1;
526 return ret;
527 }
528
529 static int
530 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
531 int errorcode, struct nfs4_exception *exception)
532 {
533 struct nfs_client *clp = server->nfs_client;
534 int ret;
535
536 ret = nfs4_do_handle_exception(server, errorcode, exception);
537 if (exception->delay) {
538 rpc_delay(task, nfs4_update_delay(&exception->timeout));
539 goto out_retry;
540 }
541 if (exception->recovering) {
542 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
543 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
544 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
545 goto out_retry;
546 }
547 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
548 ret = -EIO;
549 return ret;
550 out_retry:
551 if (ret == 0)
552 exception->retry = 1;
553 return ret;
554 }
555
556 static int
557 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
558 struct nfs4_state *state, long *timeout)
559 {
560 struct nfs4_exception exception = {
561 .state = state,
562 };
563
564 if (task->tk_status >= 0)
565 return 0;
566 if (timeout)
567 exception.timeout = *timeout;
568 task->tk_status = nfs4_async_handle_exception(task, server,
569 task->tk_status,
570 &exception);
571 if (exception.delay && timeout)
572 *timeout = exception.timeout;
573 if (exception.retry)
574 return -EAGAIN;
575 return 0;
576 }
577
578 /*
579 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
580 * or 'false' otherwise.
581 */
582 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
583 {
584 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
585 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
586 }
587
588 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
589 {
590 spin_lock(&clp->cl_lock);
591 if (time_before(clp->cl_last_renewal,timestamp))
592 clp->cl_last_renewal = timestamp;
593 spin_unlock(&clp->cl_lock);
594 }
595
596 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
597 {
598 struct nfs_client *clp = server->nfs_client;
599
600 if (!nfs4_has_session(clp))
601 do_renew_lease(clp, timestamp);
602 }
603
604 struct nfs4_call_sync_data {
605 const struct nfs_server *seq_server;
606 struct nfs4_sequence_args *seq_args;
607 struct nfs4_sequence_res *seq_res;
608 };
609
610 void nfs4_init_sequence(struct nfs4_sequence_args *args,
611 struct nfs4_sequence_res *res, int cache_reply)
612 {
613 args->sa_slot = NULL;
614 args->sa_cache_this = cache_reply;
615 args->sa_privileged = 0;
616
617 res->sr_slot = NULL;
618 }
619
620 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
621 {
622 args->sa_privileged = 1;
623 }
624
625 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
626 {
627 struct nfs4_slot *slot = res->sr_slot;
628 struct nfs4_slot_table *tbl;
629
630 tbl = slot->table;
631 spin_lock(&tbl->slot_tbl_lock);
632 if (!nfs41_wake_and_assign_slot(tbl, slot))
633 nfs4_free_slot(tbl, slot);
634 spin_unlock(&tbl->slot_tbl_lock);
635
636 res->sr_slot = NULL;
637 }
638
639 static int nfs40_sequence_done(struct rpc_task *task,
640 struct nfs4_sequence_res *res)
641 {
642 if (res->sr_slot != NULL)
643 nfs40_sequence_free_slot(res);
644 return 1;
645 }
646
647 #if defined(CONFIG_NFS_V4_1)
648
649 static void nfs41_release_slot(struct nfs4_slot *slot)
650 {
651 struct nfs4_session *session;
652 struct nfs4_slot_table *tbl;
653 bool send_new_highest_used_slotid = false;
654
655 if (!slot)
656 return;
657 tbl = slot->table;
658 session = tbl->session;
659
660 /* Bump the slot sequence number */
661 if (slot->seq_done)
662 slot->seq_nr++;
663 slot->seq_done = 0;
664
665 spin_lock(&tbl->slot_tbl_lock);
666 /* Be nice to the server: try to ensure that the last transmitted
667 * value for highest_user_slotid <= target_highest_slotid
668 */
669 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
670 send_new_highest_used_slotid = true;
671
672 if (nfs41_wake_and_assign_slot(tbl, slot)) {
673 send_new_highest_used_slotid = false;
674 goto out_unlock;
675 }
676 nfs4_free_slot(tbl, slot);
677
678 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
679 send_new_highest_used_slotid = false;
680 out_unlock:
681 spin_unlock(&tbl->slot_tbl_lock);
682 if (send_new_highest_used_slotid)
683 nfs41_notify_server(session->clp);
684 if (waitqueue_active(&tbl->slot_waitq))
685 wake_up_all(&tbl->slot_waitq);
686 }
687
688 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
689 {
690 nfs41_release_slot(res->sr_slot);
691 res->sr_slot = NULL;
692 }
693
694 static int nfs41_sequence_process(struct rpc_task *task,
695 struct nfs4_sequence_res *res)
696 {
697 struct nfs4_session *session;
698 struct nfs4_slot *slot = res->sr_slot;
699 struct nfs_client *clp;
700 bool interrupted = false;
701 int ret = 1;
702
703 if (slot == NULL)
704 goto out_noaction;
705 /* don't increment the sequence number if the task wasn't sent */
706 if (!RPC_WAS_SENT(task))
707 goto out;
708
709 session = slot->table->session;
710
711 if (slot->interrupted) {
712 if (res->sr_status != -NFS4ERR_DELAY)
713 slot->interrupted = 0;
714 interrupted = true;
715 }
716
717 trace_nfs4_sequence_done(session, res);
718 /* Check the SEQUENCE operation status */
719 switch (res->sr_status) {
720 case 0:
721 /* Update the slot's sequence and clientid lease timer */
722 slot->seq_done = 1;
723 clp = session->clp;
724 do_renew_lease(clp, res->sr_timestamp);
725 /* Check sequence flags */
726 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
727 !!slot->privileged);
728 nfs41_update_target_slotid(slot->table, slot, res);
729 break;
730 case 1:
731 /*
732 * sr_status remains 1 if an RPC level error occurred.
733 * The server may or may not have processed the sequence
734 * operation..
735 * Mark the slot as having hosted an interrupted RPC call.
736 */
737 slot->interrupted = 1;
738 goto out;
739 case -NFS4ERR_DELAY:
740 /* The server detected a resend of the RPC call and
741 * returned NFS4ERR_DELAY as per Section 2.10.6.2
742 * of RFC5661.
743 */
744 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
745 __func__,
746 slot->slot_nr,
747 slot->seq_nr);
748 goto out_retry;
749 case -NFS4ERR_BADSLOT:
750 /*
751 * The slot id we used was probably retired. Try again
752 * using a different slot id.
753 */
754 if (slot->seq_nr < slot->table->target_highest_slotid)
755 goto session_recover;
756 goto retry_nowait;
757 case -NFS4ERR_SEQ_MISORDERED:
758 /*
759 * Was the last operation on this sequence interrupted?
760 * If so, retry after bumping the sequence number.
761 */
762 if (interrupted)
763 goto retry_new_seq;
764 /*
765 * Could this slot have been previously retired?
766 * If so, then the server may be expecting seq_nr = 1!
767 */
768 if (slot->seq_nr != 1) {
769 slot->seq_nr = 1;
770 goto retry_nowait;
771 }
772 goto session_recover;
773 case -NFS4ERR_SEQ_FALSE_RETRY:
774 if (interrupted)
775 goto retry_new_seq;
776 goto session_recover;
777 default:
778 /* Just update the slot sequence no. */
779 slot->seq_done = 1;
780 }
781 out:
782 /* The session may be reset by one of the error handlers. */
783 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
784 out_noaction:
785 return ret;
786 session_recover:
787 nfs4_schedule_session_recovery(session, res->sr_status);
788 goto retry_nowait;
789 retry_new_seq:
790 ++slot->seq_nr;
791 retry_nowait:
792 if (rpc_restart_call_prepare(task)) {
793 nfs41_sequence_free_slot(res);
794 task->tk_status = 0;
795 ret = 0;
796 }
797 goto out;
798 out_retry:
799 if (!rpc_restart_call(task))
800 goto out;
801 rpc_delay(task, NFS4_POLL_RETRY_MAX);
802 return 0;
803 }
804
805 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
806 {
807 if (!nfs41_sequence_process(task, res))
808 return 0;
809 if (res->sr_slot != NULL)
810 nfs41_sequence_free_slot(res);
811 return 1;
812
813 }
814 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
815
816 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
817 {
818 if (res->sr_slot == NULL)
819 return 1;
820 if (res->sr_slot->table->session != NULL)
821 return nfs41_sequence_process(task, res);
822 return nfs40_sequence_done(task, res);
823 }
824
825 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
826 {
827 if (res->sr_slot != NULL) {
828 if (res->sr_slot->table->session != NULL)
829 nfs41_sequence_free_slot(res);
830 else
831 nfs40_sequence_free_slot(res);
832 }
833 }
834
835 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
836 {
837 if (res->sr_slot == NULL)
838 return 1;
839 if (!res->sr_slot->table->session)
840 return nfs40_sequence_done(task, res);
841 return nfs41_sequence_done(task, res);
842 }
843 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
844
845 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
846 {
847 struct nfs4_call_sync_data *data = calldata;
848
849 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
850
851 nfs4_setup_sequence(data->seq_server->nfs_client,
852 data->seq_args, data->seq_res, task);
853 }
854
855 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
856 {
857 struct nfs4_call_sync_data *data = calldata;
858
859 nfs41_sequence_done(task, data->seq_res);
860 }
861
862 static const struct rpc_call_ops nfs41_call_sync_ops = {
863 .rpc_call_prepare = nfs41_call_sync_prepare,
864 .rpc_call_done = nfs41_call_sync_done,
865 };
866
867 static void
868 nfs4_sequence_process_interrupted(struct nfs_client *client,
869 struct nfs4_slot *slot, struct rpc_cred *cred)
870 {
871 struct rpc_task *task;
872
873 task = _nfs41_proc_sequence(client, cred, slot, true);
874 if (!IS_ERR(task))
875 rpc_put_task_async(task);
876 }
877
878 #else /* !CONFIG_NFS_V4_1 */
879
880 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
881 {
882 return nfs40_sequence_done(task, res);
883 }
884
885 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
886 {
887 if (res->sr_slot != NULL)
888 nfs40_sequence_free_slot(res);
889 }
890
891 int nfs4_sequence_done(struct rpc_task *task,
892 struct nfs4_sequence_res *res)
893 {
894 return nfs40_sequence_done(task, res);
895 }
896 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
897
898 static void
899 nfs4_sequence_process_interrupted(struct nfs_client *client,
900 struct nfs4_slot *slot, struct rpc_cred *cred)
901 {
902 WARN_ON_ONCE(1);
903 slot->interrupted = 0;
904 }
905
906 #endif /* !CONFIG_NFS_V4_1 */
907
908 static
909 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
910 struct nfs4_sequence_res *res,
911 struct nfs4_slot *slot)
912 {
913 if (!slot)
914 return;
915 slot->privileged = args->sa_privileged ? 1 : 0;
916 args->sa_slot = slot;
917
918 res->sr_slot = slot;
919 res->sr_timestamp = jiffies;
920 res->sr_status_flags = 0;
921 res->sr_status = 1;
922
923 }
924
925 int nfs4_setup_sequence(struct nfs_client *client,
926 struct nfs4_sequence_args *args,
927 struct nfs4_sequence_res *res,
928 struct rpc_task *task)
929 {
930 struct nfs4_session *session = nfs4_get_session(client);
931 struct nfs4_slot_table *tbl = client->cl_slot_tbl;
932 struct nfs4_slot *slot;
933
934 /* slot already allocated? */
935 if (res->sr_slot != NULL)
936 goto out_start;
937
938 if (session) {
939 tbl = &session->fc_slot_table;
940 task->tk_timeout = 0;
941 }
942
943 for (;;) {
944 spin_lock(&tbl->slot_tbl_lock);
945 /* The state manager will wait until the slot table is empty */
946 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
947 goto out_sleep;
948
949 slot = nfs4_alloc_slot(tbl);
950 if (IS_ERR(slot)) {
951 /* Try again in 1/4 second */
952 if (slot == ERR_PTR(-ENOMEM))
953 task->tk_timeout = HZ >> 2;
954 goto out_sleep;
955 }
956 spin_unlock(&tbl->slot_tbl_lock);
957
958 if (likely(!slot->interrupted))
959 break;
960 nfs4_sequence_process_interrupted(client,
961 slot, task->tk_msg.rpc_cred);
962 }
963
964 nfs4_sequence_attach_slot(args, res, slot);
965
966 trace_nfs4_setup_sequence(session, args);
967 out_start:
968 rpc_call_start(task);
969 return 0;
970
971 out_sleep:
972 if (args->sa_privileged)
973 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
974 NULL, RPC_PRIORITY_PRIVILEGED);
975 else
976 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
977 spin_unlock(&tbl->slot_tbl_lock);
978 return -EAGAIN;
979 }
980 EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
981
982 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
983 {
984 struct nfs4_call_sync_data *data = calldata;
985 nfs4_setup_sequence(data->seq_server->nfs_client,
986 data->seq_args, data->seq_res, task);
987 }
988
989 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
990 {
991 struct nfs4_call_sync_data *data = calldata;
992 nfs4_sequence_done(task, data->seq_res);
993 }
994
995 static const struct rpc_call_ops nfs40_call_sync_ops = {
996 .rpc_call_prepare = nfs40_call_sync_prepare,
997 .rpc_call_done = nfs40_call_sync_done,
998 };
999
1000 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1001 struct nfs_server *server,
1002 struct rpc_message *msg,
1003 struct nfs4_sequence_args *args,
1004 struct nfs4_sequence_res *res)
1005 {
1006 int ret;
1007 struct rpc_task *task;
1008 struct nfs_client *clp = server->nfs_client;
1009 struct nfs4_call_sync_data data = {
1010 .seq_server = server,
1011 .seq_args = args,
1012 .seq_res = res,
1013 };
1014 struct rpc_task_setup task_setup = {
1015 .rpc_client = clnt,
1016 .rpc_message = msg,
1017 .callback_ops = clp->cl_mvops->call_sync_ops,
1018 .callback_data = &data
1019 };
1020
1021 task = rpc_run_task(&task_setup);
1022 if (IS_ERR(task))
1023 ret = PTR_ERR(task);
1024 else {
1025 ret = task->tk_status;
1026 rpc_put_task(task);
1027 }
1028 return ret;
1029 }
1030
1031 int nfs4_call_sync(struct rpc_clnt *clnt,
1032 struct nfs_server *server,
1033 struct rpc_message *msg,
1034 struct nfs4_sequence_args *args,
1035 struct nfs4_sequence_res *res,
1036 int cache_reply)
1037 {
1038 nfs4_init_sequence(args, res, cache_reply);
1039 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1040 }
1041
1042 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1043 unsigned long timestamp)
1044 {
1045 struct nfs_inode *nfsi = NFS_I(dir);
1046
1047 spin_lock(&dir->i_lock);
1048 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1049 if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(dir)) {
1050 nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
1051 nfsi->attrtimeo_timestamp = jiffies;
1052 } else {
1053 nfs_force_lookup_revalidate(dir);
1054 if (cinfo->before != inode_peek_iversion_raw(dir))
1055 nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
1056 NFS_INO_INVALID_ACL;
1057 }
1058 inode_set_iversion_raw(dir, cinfo->after);
1059 nfsi->read_cache_jiffies = timestamp;
1060 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1061 nfs_fscache_invalidate(dir);
1062 spin_unlock(&dir->i_lock);
1063 }
1064
1065 struct nfs4_opendata {
1066 struct kref kref;
1067 struct nfs_openargs o_arg;
1068 struct nfs_openres o_res;
1069 struct nfs_open_confirmargs c_arg;
1070 struct nfs_open_confirmres c_res;
1071 struct nfs4_string owner_name;
1072 struct nfs4_string group_name;
1073 struct nfs4_label *a_label;
1074 struct nfs_fattr f_attr;
1075 struct nfs4_label *f_label;
1076 struct dentry *dir;
1077 struct dentry *dentry;
1078 struct nfs4_state_owner *owner;
1079 struct nfs4_state *state;
1080 struct iattr attrs;
1081 unsigned long timestamp;
1082 bool rpc_done;
1083 bool file_created;
1084 bool is_recover;
1085 bool cancelled;
1086 int rpc_status;
1087 };
1088
1089 struct nfs4_open_createattrs {
1090 struct nfs4_label *label;
1091 struct iattr *sattr;
1092 const __u32 verf[2];
1093 };
1094
1095 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1096 int err, struct nfs4_exception *exception)
1097 {
1098 if (err != -EINVAL)
1099 return false;
1100 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1101 return false;
1102 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1103 exception->retry = 1;
1104 return true;
1105 }
1106
1107 static u32
1108 nfs4_map_atomic_open_share(struct nfs_server *server,
1109 fmode_t fmode, int openflags)
1110 {
1111 u32 res = 0;
1112
1113 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1114 case FMODE_READ:
1115 res = NFS4_SHARE_ACCESS_READ;
1116 break;
1117 case FMODE_WRITE:
1118 res = NFS4_SHARE_ACCESS_WRITE;
1119 break;
1120 case FMODE_READ|FMODE_WRITE:
1121 res = NFS4_SHARE_ACCESS_BOTH;
1122 }
1123 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1124 goto out;
1125 /* Want no delegation if we're using O_DIRECT */
1126 if (openflags & O_DIRECT)
1127 res |= NFS4_SHARE_WANT_NO_DELEG;
1128 out:
1129 return res;
1130 }
1131
1132 static enum open_claim_type4
1133 nfs4_map_atomic_open_claim(struct nfs_server *server,
1134 enum open_claim_type4 claim)
1135 {
1136 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1137 return claim;
1138 switch (claim) {
1139 default:
1140 return claim;
1141 case NFS4_OPEN_CLAIM_FH:
1142 return NFS4_OPEN_CLAIM_NULL;
1143 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1144 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1145 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1146 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1147 }
1148 }
1149
1150 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1151 {
1152 p->o_res.f_attr = &p->f_attr;
1153 p->o_res.f_label = p->f_label;
1154 p->o_res.seqid = p->o_arg.seqid;
1155 p->c_res.seqid = p->c_arg.seqid;
1156 p->o_res.server = p->o_arg.server;
1157 p->o_res.access_request = p->o_arg.access;
1158 nfs_fattr_init(&p->f_attr);
1159 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1160 }
1161
1162 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1163 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1164 const struct nfs4_open_createattrs *c,
1165 enum open_claim_type4 claim,
1166 gfp_t gfp_mask)
1167 {
1168 struct dentry *parent = dget_parent(dentry);
1169 struct inode *dir = d_inode(parent);
1170 struct nfs_server *server = NFS_SERVER(dir);
1171 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1172 struct nfs4_label *label = (c != NULL) ? c->label : NULL;
1173 struct nfs4_opendata *p;
1174
1175 p = kzalloc(sizeof(*p), gfp_mask);
1176 if (p == NULL)
1177 goto err;
1178
1179 p->f_label = nfs4_label_alloc(server, gfp_mask);
1180 if (IS_ERR(p->f_label))
1181 goto err_free_p;
1182
1183 p->a_label = nfs4_label_alloc(server, gfp_mask);
1184 if (IS_ERR(p->a_label))
1185 goto err_free_f;
1186
1187 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1188 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1189 if (IS_ERR(p->o_arg.seqid))
1190 goto err_free_label;
1191 nfs_sb_active(dentry->d_sb);
1192 p->dentry = dget(dentry);
1193 p->dir = parent;
1194 p->owner = sp;
1195 atomic_inc(&sp->so_count);
1196 p->o_arg.open_flags = flags;
1197 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1198 p->o_arg.umask = current_umask();
1199 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1200 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1201 fmode, flags);
1202 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1203 * will return permission denied for all bits until close */
1204 if (!(flags & O_EXCL)) {
1205 /* ask server to check for all possible rights as results
1206 * are cached */
1207 switch (p->o_arg.claim) {
1208 default:
1209 break;
1210 case NFS4_OPEN_CLAIM_NULL:
1211 case NFS4_OPEN_CLAIM_FH:
1212 p->o_arg.access = NFS4_ACCESS_READ |
1213 NFS4_ACCESS_MODIFY |
1214 NFS4_ACCESS_EXTEND |
1215 NFS4_ACCESS_EXECUTE;
1216 }
1217 }
1218 p->o_arg.clientid = server->nfs_client->cl_clientid;
1219 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1220 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1221 p->o_arg.name = &dentry->d_name;
1222 p->o_arg.server = server;
1223 p->o_arg.bitmask = nfs4_bitmask(server, label);
1224 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1225 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1226 switch (p->o_arg.claim) {
1227 case NFS4_OPEN_CLAIM_NULL:
1228 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1229 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1230 p->o_arg.fh = NFS_FH(dir);
1231 break;
1232 case NFS4_OPEN_CLAIM_PREVIOUS:
1233 case NFS4_OPEN_CLAIM_FH:
1234 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1235 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1236 p->o_arg.fh = NFS_FH(d_inode(dentry));
1237 }
1238 if (c != NULL && c->sattr != NULL && c->sattr->ia_valid != 0) {
1239 p->o_arg.u.attrs = &p->attrs;
1240 memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
1241
1242 memcpy(p->o_arg.u.verifier.data, c->verf,
1243 sizeof(p->o_arg.u.verifier.data));
1244 }
1245 p->c_arg.fh = &p->o_res.fh;
1246 p->c_arg.stateid = &p->o_res.stateid;
1247 p->c_arg.seqid = p->o_arg.seqid;
1248 nfs4_init_opendata_res(p);
1249 kref_init(&p->kref);
1250 return p;
1251
1252 err_free_label:
1253 nfs4_label_free(p->a_label);
1254 err_free_f:
1255 nfs4_label_free(p->f_label);
1256 err_free_p:
1257 kfree(p);
1258 err:
1259 dput(parent);
1260 return NULL;
1261 }
1262
1263 static void nfs4_opendata_free(struct kref *kref)
1264 {
1265 struct nfs4_opendata *p = container_of(kref,
1266 struct nfs4_opendata, kref);
1267 struct super_block *sb = p->dentry->d_sb;
1268
1269 nfs_free_seqid(p->o_arg.seqid);
1270 nfs4_sequence_free_slot(&p->o_res.seq_res);
1271 if (p->state != NULL)
1272 nfs4_put_open_state(p->state);
1273 nfs4_put_state_owner(p->owner);
1274
1275 nfs4_label_free(p->a_label);
1276 nfs4_label_free(p->f_label);
1277
1278 dput(p->dir);
1279 dput(p->dentry);
1280 nfs_sb_deactive(sb);
1281 nfs_fattr_free_names(&p->f_attr);
1282 kfree(p->f_attr.mdsthreshold);
1283 kfree(p);
1284 }
1285
1286 static void nfs4_opendata_put(struct nfs4_opendata *p)
1287 {
1288 if (p != NULL)
1289 kref_put(&p->kref, nfs4_opendata_free);
1290 }
1291
1292 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1293 fmode_t fmode)
1294 {
1295 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1296 case FMODE_READ|FMODE_WRITE:
1297 return state->n_rdwr != 0;
1298 case FMODE_WRITE:
1299 return state->n_wronly != 0;
1300 case FMODE_READ:
1301 return state->n_rdonly != 0;
1302 }
1303 WARN_ON_ONCE(1);
1304 return false;
1305 }
1306
1307 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1308 {
1309 int ret = 0;
1310
1311 if (open_mode & (O_EXCL|O_TRUNC))
1312 goto out;
1313 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1314 case FMODE_READ:
1315 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1316 && state->n_rdonly != 0;
1317 break;
1318 case FMODE_WRITE:
1319 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1320 && state->n_wronly != 0;
1321 break;
1322 case FMODE_READ|FMODE_WRITE:
1323 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1324 && state->n_rdwr != 0;
1325 }
1326 out:
1327 return ret;
1328 }
1329
1330 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1331 enum open_claim_type4 claim)
1332 {
1333 if (delegation == NULL)
1334 return 0;
1335 if ((delegation->type & fmode) != fmode)
1336 return 0;
1337 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1338 return 0;
1339 switch (claim) {
1340 case NFS4_OPEN_CLAIM_NULL:
1341 case NFS4_OPEN_CLAIM_FH:
1342 break;
1343 case NFS4_OPEN_CLAIM_PREVIOUS:
1344 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1345 break;
1346 default:
1347 return 0;
1348 }
1349 nfs_mark_delegation_referenced(delegation);
1350 return 1;
1351 }
1352
1353 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1354 {
1355 switch (fmode) {
1356 case FMODE_WRITE:
1357 state->n_wronly++;
1358 break;
1359 case FMODE_READ:
1360 state->n_rdonly++;
1361 break;
1362 case FMODE_READ|FMODE_WRITE:
1363 state->n_rdwr++;
1364 }
1365 nfs4_state_set_mode_locked(state, state->state | fmode);
1366 }
1367
1368 #ifdef CONFIG_NFS_V4_1
1369 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
1370 {
1371 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
1372 return true;
1373 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
1374 return true;
1375 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
1376 return true;
1377 return false;
1378 }
1379 #endif /* CONFIG_NFS_V4_1 */
1380
1381 static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
1382 {
1383 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1384 wake_up_all(&state->waitq);
1385 }
1386
1387 static void nfs_state_log_out_of_order_open_stateid(struct nfs4_state *state,
1388 const nfs4_stateid *stateid)
1389 {
1390 u32 state_seqid = be32_to_cpu(state->open_stateid.seqid);
1391 u32 stateid_seqid = be32_to_cpu(stateid->seqid);
1392
1393 if (stateid_seqid == state_seqid + 1U ||
1394 (stateid_seqid == 1U && state_seqid == 0xffffffffU))
1395 nfs_state_log_update_open_stateid(state);
1396 else
1397 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1398 }
1399
1400 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1401 {
1402 struct nfs_client *clp = state->owner->so_server->nfs_client;
1403 bool need_recover = false;
1404
1405 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1406 need_recover = true;
1407 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1408 need_recover = true;
1409 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1410 need_recover = true;
1411 if (need_recover)
1412 nfs4_state_mark_reclaim_nograce(clp, state);
1413 }
1414
1415 /*
1416 * Check for whether or not the caller may update the open stateid
1417 * to the value passed in by stateid.
1418 *
1419 * Note: This function relies heavily on the server implementing
1420 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
1421 * correctly.
1422 * i.e. The stateid seqids have to be initialised to 1, and
1423 * are then incremented on every state transition.
1424 */
1425 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1426 const nfs4_stateid *stateid)
1427 {
1428 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0 ||
1429 !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1430 if (stateid->seqid == cpu_to_be32(1))
1431 nfs_state_log_update_open_stateid(state);
1432 else
1433 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1434 return true;
1435 }
1436
1437 if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1438 nfs_state_log_out_of_order_open_stateid(state, stateid);
1439 return true;
1440 }
1441 return false;
1442 }
1443
1444 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1445 {
1446 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1447 return;
1448 if (state->n_wronly)
1449 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1450 if (state->n_rdonly)
1451 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1452 if (state->n_rdwr)
1453 set_bit(NFS_O_RDWR_STATE, &state->flags);
1454 set_bit(NFS_OPEN_STATE, &state->flags);
1455 }
1456
1457 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1458 nfs4_stateid *stateid, fmode_t fmode)
1459 {
1460 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1461 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1462 case FMODE_WRITE:
1463 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1464 break;
1465 case FMODE_READ:
1466 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1467 break;
1468 case 0:
1469 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1470 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1471 clear_bit(NFS_OPEN_STATE, &state->flags);
1472 }
1473 if (stateid == NULL)
1474 return;
1475 /* Handle OPEN+OPEN_DOWNGRADE races */
1476 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1477 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1478 nfs_resync_open_stateid_locked(state);
1479 goto out;
1480 }
1481 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1482 nfs4_stateid_copy(&state->stateid, stateid);
1483 nfs4_stateid_copy(&state->open_stateid, stateid);
1484 trace_nfs4_open_stateid_update(state->inode, stateid, 0);
1485 out:
1486 nfs_state_log_update_open_stateid(state);
1487 }
1488
1489 static void nfs_clear_open_stateid(struct nfs4_state *state,
1490 nfs4_stateid *arg_stateid,
1491 nfs4_stateid *stateid, fmode_t fmode)
1492 {
1493 write_seqlock(&state->seqlock);
1494 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1495 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1496 nfs_clear_open_stateid_locked(state, stateid, fmode);
1497 write_sequnlock(&state->seqlock);
1498 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1499 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1500 }
1501
1502 static void nfs_set_open_stateid_locked(struct nfs4_state *state,
1503 const nfs4_stateid *stateid, nfs4_stateid *freeme)
1504 {
1505 DEFINE_WAIT(wait);
1506 int status = 0;
1507 for (;;) {
1508
1509 if (!nfs_need_update_open_stateid(state, stateid))
1510 return;
1511 if (!test_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1512 break;
1513 if (status)
1514 break;
1515 /* Rely on seqids for serialisation with NFSv4.0 */
1516 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
1517 break;
1518
1519 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
1520 /*
1521 * Ensure we process the state changes in the same order
1522 * in which the server processed them by delaying the
1523 * update of the stateid until we are in sequence.
1524 */
1525 write_sequnlock(&state->seqlock);
1526 spin_unlock(&state->owner->so_lock);
1527 rcu_read_unlock();
1528 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
1529 if (!signal_pending(current)) {
1530 if (schedule_timeout(5*HZ) == 0)
1531 status = -EAGAIN;
1532 else
1533 status = 0;
1534 } else
1535 status = -EINTR;
1536 finish_wait(&state->waitq, &wait);
1537 rcu_read_lock();
1538 spin_lock(&state->owner->so_lock);
1539 write_seqlock(&state->seqlock);
1540 }
1541
1542 if (test_bit(NFS_OPEN_STATE, &state->flags) &&
1543 !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1544 nfs4_stateid_copy(freeme, &state->open_stateid);
1545 nfs_test_and_clear_all_open_stateid(state);
1546 }
1547
1548 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1549 nfs4_stateid_copy(&state->stateid, stateid);
1550 nfs4_stateid_copy(&state->open_stateid, stateid);
1551 trace_nfs4_open_stateid_update(state->inode, stateid, status);
1552 nfs_state_log_update_open_stateid(state);
1553 }
1554
1555 static void nfs_state_set_open_stateid(struct nfs4_state *state,
1556 const nfs4_stateid *open_stateid,
1557 fmode_t fmode,
1558 nfs4_stateid *freeme)
1559 {
1560 /*
1561 * Protect the call to nfs4_state_set_mode_locked and
1562 * serialise the stateid update
1563 */
1564 write_seqlock(&state->seqlock);
1565 nfs_set_open_stateid_locked(state, open_stateid, freeme);
1566 switch (fmode) {
1567 case FMODE_READ:
1568 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1569 break;
1570 case FMODE_WRITE:
1571 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1572 break;
1573 case FMODE_READ|FMODE_WRITE:
1574 set_bit(NFS_O_RDWR_STATE, &state->flags);
1575 }
1576 set_bit(NFS_OPEN_STATE, &state->flags);
1577 write_sequnlock(&state->seqlock);
1578 }
1579
1580 static void nfs_state_set_delegation(struct nfs4_state *state,
1581 const nfs4_stateid *deleg_stateid,
1582 fmode_t fmode)
1583 {
1584 /*
1585 * Protect the call to nfs4_state_set_mode_locked and
1586 * serialise the stateid update
1587 */
1588 write_seqlock(&state->seqlock);
1589 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1590 set_bit(NFS_DELEGATED_STATE, &state->flags);
1591 write_sequnlock(&state->seqlock);
1592 }
1593
1594 static int update_open_stateid(struct nfs4_state *state,
1595 const nfs4_stateid *open_stateid,
1596 const nfs4_stateid *delegation,
1597 fmode_t fmode)
1598 {
1599 struct nfs_server *server = NFS_SERVER(state->inode);
1600 struct nfs_client *clp = server->nfs_client;
1601 struct nfs_inode *nfsi = NFS_I(state->inode);
1602 struct nfs_delegation *deleg_cur;
1603 nfs4_stateid freeme = { };
1604 int ret = 0;
1605
1606 fmode &= (FMODE_READ|FMODE_WRITE);
1607
1608 rcu_read_lock();
1609 spin_lock(&state->owner->so_lock);
1610 if (open_stateid != NULL) {
1611 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
1612 ret = 1;
1613 }
1614
1615 deleg_cur = rcu_dereference(nfsi->delegation);
1616 if (deleg_cur == NULL)
1617 goto no_delegation;
1618
1619 spin_lock(&deleg_cur->lock);
1620 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1621 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1622 (deleg_cur->type & fmode) != fmode)
1623 goto no_delegation_unlock;
1624
1625 if (delegation == NULL)
1626 delegation = &deleg_cur->stateid;
1627 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1628 goto no_delegation_unlock;
1629
1630 nfs_mark_delegation_referenced(deleg_cur);
1631 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
1632 ret = 1;
1633 no_delegation_unlock:
1634 spin_unlock(&deleg_cur->lock);
1635 no_delegation:
1636 if (ret)
1637 update_open_stateflags(state, fmode);
1638 spin_unlock(&state->owner->so_lock);
1639 rcu_read_unlock();
1640
1641 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1642 nfs4_schedule_state_manager(clp);
1643 if (freeme.type != 0)
1644 nfs4_test_and_free_stateid(server, &freeme,
1645 state->owner->so_cred);
1646
1647 return ret;
1648 }
1649
1650 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1651 const nfs4_stateid *stateid)
1652 {
1653 struct nfs4_state *state = lsp->ls_state;
1654 bool ret = false;
1655
1656 spin_lock(&state->state_lock);
1657 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1658 goto out_noupdate;
1659 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1660 goto out_noupdate;
1661 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1662 ret = true;
1663 out_noupdate:
1664 spin_unlock(&state->state_lock);
1665 return ret;
1666 }
1667
1668 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1669 {
1670 struct nfs_delegation *delegation;
1671
1672 rcu_read_lock();
1673 delegation = rcu_dereference(NFS_I(inode)->delegation);
1674 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1675 rcu_read_unlock();
1676 return;
1677 }
1678 rcu_read_unlock();
1679 nfs4_inode_return_delegation(inode);
1680 }
1681
1682 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1683 {
1684 struct nfs4_state *state = opendata->state;
1685 struct nfs_inode *nfsi = NFS_I(state->inode);
1686 struct nfs_delegation *delegation;
1687 int open_mode = opendata->o_arg.open_flags;
1688 fmode_t fmode = opendata->o_arg.fmode;
1689 enum open_claim_type4 claim = opendata->o_arg.claim;
1690 nfs4_stateid stateid;
1691 int ret = -EAGAIN;
1692
1693 for (;;) {
1694 spin_lock(&state->owner->so_lock);
1695 if (can_open_cached(state, fmode, open_mode)) {
1696 update_open_stateflags(state, fmode);
1697 spin_unlock(&state->owner->so_lock);
1698 goto out_return_state;
1699 }
1700 spin_unlock(&state->owner->so_lock);
1701 rcu_read_lock();
1702 delegation = rcu_dereference(nfsi->delegation);
1703 if (!can_open_delegated(delegation, fmode, claim)) {
1704 rcu_read_unlock();
1705 break;
1706 }
1707 /* Save the delegation */
1708 nfs4_stateid_copy(&stateid, &delegation->stateid);
1709 rcu_read_unlock();
1710 nfs_release_seqid(opendata->o_arg.seqid);
1711 if (!opendata->is_recover) {
1712 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1713 if (ret != 0)
1714 goto out;
1715 }
1716 ret = -EAGAIN;
1717
1718 /* Try to update the stateid using the delegation */
1719 if (update_open_stateid(state, NULL, &stateid, fmode))
1720 goto out_return_state;
1721 }
1722 out:
1723 return ERR_PTR(ret);
1724 out_return_state:
1725 atomic_inc(&state->count);
1726 return state;
1727 }
1728
1729 static void
1730 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1731 {
1732 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1733 struct nfs_delegation *delegation;
1734 int delegation_flags = 0;
1735
1736 rcu_read_lock();
1737 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1738 if (delegation)
1739 delegation_flags = delegation->flags;
1740 rcu_read_unlock();
1741 switch (data->o_arg.claim) {
1742 default:
1743 break;
1744 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1745 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1746 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1747 "returning a delegation for "
1748 "OPEN(CLAIM_DELEGATE_CUR)\n",
1749 clp->cl_hostname);
1750 return;
1751 }
1752 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1753 nfs_inode_set_delegation(state->inode,
1754 data->owner->so_cred,
1755 &data->o_res);
1756 else
1757 nfs_inode_reclaim_delegation(state->inode,
1758 data->owner->so_cred,
1759 &data->o_res);
1760 }
1761
1762 /*
1763 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1764 * and update the nfs4_state.
1765 */
1766 static struct nfs4_state *
1767 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1768 {
1769 struct inode *inode = data->state->inode;
1770 struct nfs4_state *state = data->state;
1771 int ret;
1772
1773 if (!data->rpc_done) {
1774 if (data->rpc_status)
1775 return ERR_PTR(data->rpc_status);
1776 /* cached opens have already been processed */
1777 goto update;
1778 }
1779
1780 ret = nfs_refresh_inode(inode, &data->f_attr);
1781 if (ret)
1782 return ERR_PTR(ret);
1783
1784 if (data->o_res.delegation_type != 0)
1785 nfs4_opendata_check_deleg(data, state);
1786 update:
1787 update_open_stateid(state, &data->o_res.stateid, NULL,
1788 data->o_arg.fmode);
1789 atomic_inc(&state->count);
1790
1791 return state;
1792 }
1793
1794 static struct inode *
1795 nfs4_opendata_get_inode(struct nfs4_opendata *data)
1796 {
1797 struct inode *inode;
1798
1799 switch (data->o_arg.claim) {
1800 case NFS4_OPEN_CLAIM_NULL:
1801 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1802 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1803 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1804 return ERR_PTR(-EAGAIN);
1805 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
1806 &data->f_attr, data->f_label);
1807 break;
1808 default:
1809 inode = d_inode(data->dentry);
1810 ihold(inode);
1811 nfs_refresh_inode(inode, &data->f_attr);
1812 }
1813 return inode;
1814 }
1815
1816 static struct nfs4_state *
1817 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
1818 {
1819 struct nfs4_state *state;
1820 struct inode *inode;
1821
1822 inode = nfs4_opendata_get_inode(data);
1823 if (IS_ERR(inode))
1824 return ERR_CAST(inode);
1825 if (data->state != NULL && data->state->inode == inode) {
1826 state = data->state;
1827 atomic_inc(&state->count);
1828 } else
1829 state = nfs4_get_open_state(inode, data->owner);
1830 iput(inode);
1831 if (state == NULL)
1832 state = ERR_PTR(-ENOMEM);
1833 return state;
1834 }
1835
1836 static struct nfs4_state *
1837 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1838 {
1839 struct nfs4_state *state;
1840
1841 if (!data->rpc_done) {
1842 state = nfs4_try_open_cached(data);
1843 trace_nfs4_cached_open(data->state);
1844 goto out;
1845 }
1846
1847 state = nfs4_opendata_find_nfs4_state(data);
1848 if (IS_ERR(state))
1849 goto out;
1850
1851 if (data->o_res.delegation_type != 0)
1852 nfs4_opendata_check_deleg(data, state);
1853 update_open_stateid(state, &data->o_res.stateid, NULL,
1854 data->o_arg.fmode);
1855 out:
1856 nfs_release_seqid(data->o_arg.seqid);
1857 return state;
1858 }
1859
1860 static struct nfs4_state *
1861 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1862 {
1863 struct nfs4_state *ret;
1864
1865 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1866 ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
1867 else
1868 ret = _nfs4_opendata_to_nfs4_state(data);
1869 nfs4_sequence_free_slot(&data->o_res.seq_res);
1870 return ret;
1871 }
1872
1873 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1874 {
1875 struct nfs_inode *nfsi = NFS_I(state->inode);
1876 struct nfs_open_context *ctx;
1877
1878 spin_lock(&state->inode->i_lock);
1879 list_for_each_entry(ctx, &nfsi->open_files, list) {
1880 if (ctx->state != state)
1881 continue;
1882 get_nfs_open_context(ctx);
1883 spin_unlock(&state->inode->i_lock);
1884 return ctx;
1885 }
1886 spin_unlock(&state->inode->i_lock);
1887 return ERR_PTR(-ENOENT);
1888 }
1889
1890 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1891 struct nfs4_state *state, enum open_claim_type4 claim)
1892 {
1893 struct nfs4_opendata *opendata;
1894
1895 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1896 NULL, claim, GFP_NOFS);
1897 if (opendata == NULL)
1898 return ERR_PTR(-ENOMEM);
1899 opendata->state = state;
1900 atomic_inc(&state->count);
1901 return opendata;
1902 }
1903
1904 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
1905 fmode_t fmode)
1906 {
1907 struct nfs4_state *newstate;
1908 int ret;
1909
1910 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
1911 return 0;
1912 opendata->o_arg.open_flags = 0;
1913 opendata->o_arg.fmode = fmode;
1914 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1915 NFS_SB(opendata->dentry->d_sb),
1916 fmode, 0);
1917 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1918 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1919 nfs4_init_opendata_res(opendata);
1920 ret = _nfs4_recover_proc_open(opendata);
1921 if (ret != 0)
1922 return ret;
1923 newstate = nfs4_opendata_to_nfs4_state(opendata);
1924 if (IS_ERR(newstate))
1925 return PTR_ERR(newstate);
1926 if (newstate != opendata->state)
1927 ret = -ESTALE;
1928 nfs4_close_state(newstate, fmode);
1929 return ret;
1930 }
1931
1932 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1933 {
1934 int ret;
1935
1936 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1937 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1938 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1939 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1940 /* memory barrier prior to reading state->n_* */
1941 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1942 clear_bit(NFS_OPEN_STATE, &state->flags);
1943 smp_rmb();
1944 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1945 if (ret != 0)
1946 return ret;
1947 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1948 if (ret != 0)
1949 return ret;
1950 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
1951 if (ret != 0)
1952 return ret;
1953 /*
1954 * We may have performed cached opens for all three recoveries.
1955 * Check if we need to update the current stateid.
1956 */
1957 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1958 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1959 write_seqlock(&state->seqlock);
1960 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1961 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1962 write_sequnlock(&state->seqlock);
1963 }
1964 return 0;
1965 }
1966
1967 /*
1968 * OPEN_RECLAIM:
1969 * reclaim state on the server after a reboot.
1970 */
1971 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1972 {
1973 struct nfs_delegation *delegation;
1974 struct nfs4_opendata *opendata;
1975 fmode_t delegation_type = 0;
1976 int status;
1977
1978 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1979 NFS4_OPEN_CLAIM_PREVIOUS);
1980 if (IS_ERR(opendata))
1981 return PTR_ERR(opendata);
1982 rcu_read_lock();
1983 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1984 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1985 delegation_type = delegation->type;
1986 rcu_read_unlock();
1987 opendata->o_arg.u.delegation_type = delegation_type;
1988 status = nfs4_open_recover(opendata, state);
1989 nfs4_opendata_put(opendata);
1990 return status;
1991 }
1992
1993 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1994 {
1995 struct nfs_server *server = NFS_SERVER(state->inode);
1996 struct nfs4_exception exception = { };
1997 int err;
1998 do {
1999 err = _nfs4_do_open_reclaim(ctx, state);
2000 trace_nfs4_open_reclaim(ctx, 0, err);
2001 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2002 continue;
2003 if (err != -NFS4ERR_DELAY)
2004 break;
2005 nfs4_handle_exception(server, err, &exception);
2006 } while (exception.retry);
2007 return err;
2008 }
2009
2010 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
2011 {
2012 struct nfs_open_context *ctx;
2013 int ret;
2014
2015 ctx = nfs4_state_find_open_context(state);
2016 if (IS_ERR(ctx))
2017 return -EAGAIN;
2018 ret = nfs4_do_open_reclaim(ctx, state);
2019 put_nfs_open_context(ctx);
2020 return ret;
2021 }
2022
2023 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
2024 {
2025 switch (err) {
2026 default:
2027 printk(KERN_ERR "NFS: %s: unhandled error "
2028 "%d.\n", __func__, err);
2029 case 0:
2030 case -ENOENT:
2031 case -EAGAIN:
2032 case -ESTALE:
2033 break;
2034 case -NFS4ERR_BADSESSION:
2035 case -NFS4ERR_BADSLOT:
2036 case -NFS4ERR_BAD_HIGH_SLOT:
2037 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2038 case -NFS4ERR_DEADSESSION:
2039 set_bit(NFS_DELEGATED_STATE, &state->flags);
2040 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
2041 return -EAGAIN;
2042 case -NFS4ERR_STALE_CLIENTID:
2043 case -NFS4ERR_STALE_STATEID:
2044 set_bit(NFS_DELEGATED_STATE, &state->flags);
2045 /* Don't recall a delegation if it was lost */
2046 nfs4_schedule_lease_recovery(server->nfs_client);
2047 return -EAGAIN;
2048 case -NFS4ERR_MOVED:
2049 nfs4_schedule_migration_recovery(server);
2050 return -EAGAIN;
2051 case -NFS4ERR_LEASE_MOVED:
2052 nfs4_schedule_lease_moved_recovery(server->nfs_client);
2053 return -EAGAIN;
2054 case -NFS4ERR_DELEG_REVOKED:
2055 case -NFS4ERR_ADMIN_REVOKED:
2056 case -NFS4ERR_EXPIRED:
2057 case -NFS4ERR_BAD_STATEID:
2058 case -NFS4ERR_OPENMODE:
2059 nfs_inode_find_state_and_recover(state->inode,
2060 stateid);
2061 nfs4_schedule_stateid_recovery(server, state);
2062 return -EAGAIN;
2063 case -NFS4ERR_DELAY:
2064 case -NFS4ERR_GRACE:
2065 set_bit(NFS_DELEGATED_STATE, &state->flags);
2066 ssleep(1);
2067 return -EAGAIN;
2068 case -ENOMEM:
2069 case -NFS4ERR_DENIED:
2070 if (fl) {
2071 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
2072 if (lsp)
2073 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2074 }
2075 return 0;
2076 }
2077 return err;
2078 }
2079
2080 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2081 struct nfs4_state *state, const nfs4_stateid *stateid,
2082 fmode_t type)
2083 {
2084 struct nfs_server *server = NFS_SERVER(state->inode);
2085 struct nfs4_opendata *opendata;
2086 int err = 0;
2087
2088 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2089 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
2090 if (IS_ERR(opendata))
2091 return PTR_ERR(opendata);
2092 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2093 write_seqlock(&state->seqlock);
2094 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2095 write_sequnlock(&state->seqlock);
2096 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2097 switch (type & (FMODE_READ|FMODE_WRITE)) {
2098 case FMODE_READ|FMODE_WRITE:
2099 case FMODE_WRITE:
2100 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2101 if (err)
2102 break;
2103 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2104 if (err)
2105 break;
2106 case FMODE_READ:
2107 err = nfs4_open_recover_helper(opendata, FMODE_READ);
2108 }
2109 nfs4_opendata_put(opendata);
2110 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2111 }
2112
2113 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
2114 {
2115 struct nfs4_opendata *data = calldata;
2116
2117 nfs4_setup_sequence(data->o_arg.server->nfs_client,
2118 &data->c_arg.seq_args, &data->c_res.seq_res, task);
2119 }
2120
2121 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
2122 {
2123 struct nfs4_opendata *data = calldata;
2124
2125 nfs40_sequence_done(task, &data->c_res.seq_res);
2126
2127 data->rpc_status = task->tk_status;
2128 if (data->rpc_status == 0) {
2129 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
2130 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2131 renew_lease(data->o_res.server, data->timestamp);
2132 data->rpc_done = true;
2133 }
2134 }
2135
2136 static void nfs4_open_confirm_release(void *calldata)
2137 {
2138 struct nfs4_opendata *data = calldata;
2139 struct nfs4_state *state = NULL;
2140
2141 /* If this request hasn't been cancelled, do nothing */
2142 if (!data->cancelled)
2143 goto out_free;
2144 /* In case of error, no cleanup! */
2145 if (!data->rpc_done)
2146 goto out_free;
2147 state = nfs4_opendata_to_nfs4_state(data);
2148 if (!IS_ERR(state))
2149 nfs4_close_state(state, data->o_arg.fmode);
2150 out_free:
2151 nfs4_opendata_put(data);
2152 }
2153
2154 static const struct rpc_call_ops nfs4_open_confirm_ops = {
2155 .rpc_call_prepare = nfs4_open_confirm_prepare,
2156 .rpc_call_done = nfs4_open_confirm_done,
2157 .rpc_release = nfs4_open_confirm_release,
2158 };
2159
2160 /*
2161 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2162 */
2163 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2164 {
2165 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2166 struct rpc_task *task;
2167 struct rpc_message msg = {
2168 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2169 .rpc_argp = &data->c_arg,
2170 .rpc_resp = &data->c_res,
2171 .rpc_cred = data->owner->so_cred,
2172 };
2173 struct rpc_task_setup task_setup_data = {
2174 .rpc_client = server->client,
2175 .rpc_message = &msg,
2176 .callback_ops = &nfs4_open_confirm_ops,
2177 .callback_data = data,
2178 .workqueue = nfsiod_workqueue,
2179 .flags = RPC_TASK_ASYNC,
2180 };
2181 int status;
2182
2183 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
2184 kref_get(&data->kref);
2185 data->rpc_done = false;
2186 data->rpc_status = 0;
2187 data->timestamp = jiffies;
2188 if (data->is_recover)
2189 nfs4_set_sequence_privileged(&data->c_arg.seq_args);
2190 task = rpc_run_task(&task_setup_data);
2191 if (IS_ERR(task))
2192 return PTR_ERR(task);
2193 status = rpc_wait_for_completion_task(task);
2194 if (status != 0) {
2195 data->cancelled = true;
2196 smp_wmb();
2197 } else
2198 status = data->rpc_status;
2199 rpc_put_task(task);
2200 return status;
2201 }
2202
2203 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2204 {
2205 struct nfs4_opendata *data = calldata;
2206 struct nfs4_state_owner *sp = data->owner;
2207 struct nfs_client *clp = sp->so_server->nfs_client;
2208 enum open_claim_type4 claim = data->o_arg.claim;
2209
2210 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2211 goto out_wait;
2212 /*
2213 * Check if we still need to send an OPEN call, or if we can use
2214 * a delegation instead.
2215 */
2216 if (data->state != NULL) {
2217 struct nfs_delegation *delegation;
2218
2219 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
2220 goto out_no_action;
2221 rcu_read_lock();
2222 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
2223 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2224 goto unlock_no_action;
2225 rcu_read_unlock();
2226 }
2227 /* Update client id. */
2228 data->o_arg.clientid = clp->cl_clientid;
2229 switch (claim) {
2230 default:
2231 break;
2232 case NFS4_OPEN_CLAIM_PREVIOUS:
2233 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2234 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2235 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2236 case NFS4_OPEN_CLAIM_FH:
2237 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2238 }
2239 data->timestamp = jiffies;
2240 if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
2241 &data->o_arg.seq_args,
2242 &data->o_res.seq_res,
2243 task) != 0)
2244 nfs_release_seqid(data->o_arg.seqid);
2245
2246 /* Set the create mode (note dependency on the session type) */
2247 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2248 if (data->o_arg.open_flags & O_EXCL) {
2249 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2250 if (nfs4_has_persistent_session(clp))
2251 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2252 else if (clp->cl_mvops->minor_version > 0)
2253 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2254 }
2255 return;
2256 unlock_no_action:
2257 trace_nfs4_cached_open(data->state);
2258 rcu_read_unlock();
2259 out_no_action:
2260 task->tk_action = NULL;
2261 out_wait:
2262 nfs4_sequence_done(task, &data->o_res.seq_res);
2263 }
2264
2265 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2266 {
2267 struct nfs4_opendata *data = calldata;
2268
2269 data->rpc_status = task->tk_status;
2270
2271 if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2272 return;
2273
2274 if (task->tk_status == 0) {
2275 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2276 switch (data->o_res.f_attr->mode & S_IFMT) {
2277 case S_IFREG:
2278 break;
2279 case S_IFLNK:
2280 data->rpc_status = -ELOOP;
2281 break;
2282 case S_IFDIR:
2283 data->rpc_status = -EISDIR;
2284 break;
2285 default:
2286 data->rpc_status = -ENOTDIR;
2287 }
2288 }
2289 renew_lease(data->o_res.server, data->timestamp);
2290 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2291 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2292 }
2293 data->rpc_done = true;
2294 }
2295
2296 static void nfs4_open_release(void *calldata)
2297 {
2298 struct nfs4_opendata *data = calldata;
2299 struct nfs4_state *state = NULL;
2300
2301 /* If this request hasn't been cancelled, do nothing */
2302 if (!data->cancelled)
2303 goto out_free;
2304 /* In case of error, no cleanup! */
2305 if (data->rpc_status != 0 || !data->rpc_done)
2306 goto out_free;
2307 /* In case we need an open_confirm, no cleanup! */
2308 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2309 goto out_free;
2310 state = nfs4_opendata_to_nfs4_state(data);
2311 if (!IS_ERR(state))
2312 nfs4_close_state(state, data->o_arg.fmode);
2313 out_free:
2314 nfs4_opendata_put(data);
2315 }
2316
2317 static const struct rpc_call_ops nfs4_open_ops = {
2318 .rpc_call_prepare = nfs4_open_prepare,
2319 .rpc_call_done = nfs4_open_done,
2320 .rpc_release = nfs4_open_release,
2321 };
2322
2323 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
2324 {
2325 struct inode *dir = d_inode(data->dir);
2326 struct nfs_server *server = NFS_SERVER(dir);
2327 struct nfs_openargs *o_arg = &data->o_arg;
2328 struct nfs_openres *o_res = &data->o_res;
2329 struct rpc_task *task;
2330 struct rpc_message msg = {
2331 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2332 .rpc_argp = o_arg,
2333 .rpc_resp = o_res,
2334 .rpc_cred = data->owner->so_cred,
2335 };
2336 struct rpc_task_setup task_setup_data = {
2337 .rpc_client = server->client,
2338 .rpc_message = &msg,
2339 .callback_ops = &nfs4_open_ops,
2340 .callback_data = data,
2341 .workqueue = nfsiod_workqueue,
2342 .flags = RPC_TASK_ASYNC,
2343 };
2344 int status;
2345
2346 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
2347 kref_get(&data->kref);
2348 data->rpc_done = false;
2349 data->rpc_status = 0;
2350 data->cancelled = false;
2351 data->is_recover = false;
2352 if (isrecover) {
2353 nfs4_set_sequence_privileged(&o_arg->seq_args);
2354 data->is_recover = true;
2355 }
2356 task = rpc_run_task(&task_setup_data);
2357 if (IS_ERR(task))
2358 return PTR_ERR(task);
2359 status = rpc_wait_for_completion_task(task);
2360 if (status != 0) {
2361 data->cancelled = true;
2362 smp_wmb();
2363 } else
2364 status = data->rpc_status;
2365 rpc_put_task(task);
2366
2367 return status;
2368 }
2369
2370 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2371 {
2372 struct inode *dir = d_inode(data->dir);
2373 struct nfs_openres *o_res = &data->o_res;
2374 int status;
2375
2376 status = nfs4_run_open_task(data, 1);
2377 if (status != 0 || !data->rpc_done)
2378 return status;
2379
2380 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2381
2382 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
2383 status = _nfs4_proc_open_confirm(data);
2384
2385 return status;
2386 }
2387
2388 /*
2389 * Additional permission checks in order to distinguish between an
2390 * open for read, and an open for execute. This works around the
2391 * fact that NFSv4 OPEN treats read and execute permissions as being
2392 * the same.
2393 * Note that in the non-execute case, we want to turn off permission
2394 * checking if we just created a new file (POSIX open() semantics).
2395 */
2396 static int nfs4_opendata_access(struct rpc_cred *cred,
2397 struct nfs4_opendata *opendata,
2398 struct nfs4_state *state, fmode_t fmode,
2399 int openflags)
2400 {
2401 struct nfs_access_entry cache;
2402 u32 mask, flags;
2403
2404 /* access call failed or for some reason the server doesn't
2405 * support any access modes -- defer access call until later */
2406 if (opendata->o_res.access_supported == 0)
2407 return 0;
2408
2409 mask = 0;
2410 /*
2411 * Use openflags to check for exec, because fmode won't
2412 * always have FMODE_EXEC set when file open for exec.
2413 */
2414 if (openflags & __FMODE_EXEC) {
2415 /* ONLY check for exec rights */
2416 if (S_ISDIR(state->inode->i_mode))
2417 mask = NFS4_ACCESS_LOOKUP;
2418 else
2419 mask = NFS4_ACCESS_EXECUTE;
2420 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2421 mask = NFS4_ACCESS_READ;
2422
2423 cache.cred = cred;
2424 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2425 nfs_access_add_cache(state->inode, &cache);
2426
2427 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2428 if ((mask & ~cache.mask & flags) == 0)
2429 return 0;
2430
2431 return -EACCES;
2432 }
2433
2434 /*
2435 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2436 */
2437 static int _nfs4_proc_open(struct nfs4_opendata *data)
2438 {
2439 struct inode *dir = d_inode(data->dir);
2440 struct nfs_server *server = NFS_SERVER(dir);
2441 struct nfs_openargs *o_arg = &data->o_arg;
2442 struct nfs_openres *o_res = &data->o_res;
2443 int status;
2444
2445 status = nfs4_run_open_task(data, 0);
2446 if (!data->rpc_done)
2447 return status;
2448 if (status != 0) {
2449 if (status == -NFS4ERR_BADNAME &&
2450 !(o_arg->open_flags & O_CREAT))
2451 return -ENOENT;
2452 return status;
2453 }
2454
2455 nfs_fattr_map_and_free_names(server, &data->f_attr);
2456
2457 if (o_arg->open_flags & O_CREAT) {
2458 if (o_arg->open_flags & O_EXCL)
2459 data->file_created = true;
2460 else if (o_res->cinfo.before != o_res->cinfo.after)
2461 data->file_created = true;
2462 if (data->file_created ||
2463 inode_peek_iversion_raw(dir) != o_res->cinfo.after)
2464 update_changeattr(dir, &o_res->cinfo,
2465 o_res->f_attr->time_start);
2466 }
2467 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2468 server->caps &= ~NFS_CAP_POSIX_LOCK;
2469 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2470 status = _nfs4_proc_open_confirm(data);
2471 if (status != 0)
2472 return status;
2473 }
2474 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
2475 nfs4_sequence_free_slot(&o_res->seq_res);
2476 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2477 }
2478 return 0;
2479 }
2480
2481 /*
2482 * OPEN_EXPIRED:
2483 * reclaim state on the server after a network partition.
2484 * Assumes caller holds the appropriate lock
2485 */
2486 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2487 {
2488 struct nfs4_opendata *opendata;
2489 int ret;
2490
2491 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2492 NFS4_OPEN_CLAIM_FH);
2493 if (IS_ERR(opendata))
2494 return PTR_ERR(opendata);
2495 ret = nfs4_open_recover(opendata, state);
2496 if (ret == -ESTALE)
2497 d_drop(ctx->dentry);
2498 nfs4_opendata_put(opendata);
2499 return ret;
2500 }
2501
2502 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2503 {
2504 struct nfs_server *server = NFS_SERVER(state->inode);
2505 struct nfs4_exception exception = { };
2506 int err;
2507
2508 do {
2509 err = _nfs4_open_expired(ctx, state);
2510 trace_nfs4_open_expired(ctx, 0, err);
2511 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2512 continue;
2513 switch (err) {
2514 default:
2515 goto out;
2516 case -NFS4ERR_GRACE:
2517 case -NFS4ERR_DELAY:
2518 nfs4_handle_exception(server, err, &exception);
2519 err = 0;
2520 }
2521 } while (exception.retry);
2522 out:
2523 return err;
2524 }
2525
2526 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2527 {
2528 struct nfs_open_context *ctx;
2529 int ret;
2530
2531 ctx = nfs4_state_find_open_context(state);
2532 if (IS_ERR(ctx))
2533 return -EAGAIN;
2534 ret = nfs4_do_open_expired(ctx, state);
2535 put_nfs_open_context(ctx);
2536 return ret;
2537 }
2538
2539 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2540 const nfs4_stateid *stateid)
2541 {
2542 nfs_remove_bad_delegation(state->inode, stateid);
2543 write_seqlock(&state->seqlock);
2544 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2545 write_sequnlock(&state->seqlock);
2546 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2547 }
2548
2549 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2550 {
2551 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2552 nfs_finish_clear_delegation_stateid(state, NULL);
2553 }
2554
2555 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2556 {
2557 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2558 nfs40_clear_delegation_stateid(state);
2559 return nfs4_open_expired(sp, state);
2560 }
2561
2562 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
2563 nfs4_stateid *stateid,
2564 struct rpc_cred *cred)
2565 {
2566 return -NFS4ERR_BAD_STATEID;
2567 }
2568
2569 #if defined(CONFIG_NFS_V4_1)
2570 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
2571 nfs4_stateid *stateid,
2572 struct rpc_cred *cred)
2573 {
2574 int status;
2575
2576 switch (stateid->type) {
2577 default:
2578 break;
2579 case NFS4_INVALID_STATEID_TYPE:
2580 case NFS4_SPECIAL_STATEID_TYPE:
2581 return -NFS4ERR_BAD_STATEID;
2582 case NFS4_REVOKED_STATEID_TYPE:
2583 goto out_free;
2584 }
2585
2586 status = nfs41_test_stateid(server, stateid, cred);
2587 switch (status) {
2588 case -NFS4ERR_EXPIRED:
2589 case -NFS4ERR_ADMIN_REVOKED:
2590 case -NFS4ERR_DELEG_REVOKED:
2591 break;
2592 default:
2593 return status;
2594 }
2595 out_free:
2596 /* Ack the revoked state to the server */
2597 nfs41_free_stateid(server, stateid, cred, true);
2598 return -NFS4ERR_EXPIRED;
2599 }
2600
2601 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2602 {
2603 struct nfs_server *server = NFS_SERVER(state->inode);
2604 nfs4_stateid stateid;
2605 struct nfs_delegation *delegation;
2606 struct rpc_cred *cred;
2607 int status;
2608
2609 /* Get the delegation credential for use by test/free_stateid */
2610 rcu_read_lock();
2611 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2612 if (delegation == NULL) {
2613 rcu_read_unlock();
2614 return;
2615 }
2616
2617 nfs4_stateid_copy(&stateid, &delegation->stateid);
2618 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
2619 !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2620 &delegation->flags)) {
2621 rcu_read_unlock();
2622 nfs_finish_clear_delegation_stateid(state, &stateid);
2623 return;
2624 }
2625
2626 cred = get_rpccred(delegation->cred);
2627 rcu_read_unlock();
2628 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
2629 trace_nfs4_test_delegation_stateid(state, NULL, status);
2630 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2631 nfs_finish_clear_delegation_stateid(state, &stateid);
2632
2633 put_rpccred(cred);
2634 }
2635
2636 /**
2637 * nfs41_check_expired_locks - possibly free a lock stateid
2638 *
2639 * @state: NFSv4 state for an inode
2640 *
2641 * Returns NFS_OK if recovery for this stateid is now finished.
2642 * Otherwise a negative NFS4ERR value is returned.
2643 */
2644 static int nfs41_check_expired_locks(struct nfs4_state *state)
2645 {
2646 int status, ret = NFS_OK;
2647 struct nfs4_lock_state *lsp, *prev = NULL;
2648 struct nfs_server *server = NFS_SERVER(state->inode);
2649
2650 if (!test_bit(LK_STATE_IN_USE, &state->flags))
2651 goto out;
2652
2653 spin_lock(&state->state_lock);
2654 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2655 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
2656 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
2657
2658 refcount_inc(&lsp->ls_count);
2659 spin_unlock(&state->state_lock);
2660
2661 nfs4_put_lock_state(prev);
2662 prev = lsp;
2663
2664 status = nfs41_test_and_free_expired_stateid(server,
2665 &lsp->ls_stateid,
2666 cred);
2667 trace_nfs4_test_lock_stateid(state, lsp, status);
2668 if (status == -NFS4ERR_EXPIRED ||
2669 status == -NFS4ERR_BAD_STATEID) {
2670 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
2671 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
2672 if (!recover_lost_locks)
2673 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2674 } else if (status != NFS_OK) {
2675 ret = status;
2676 nfs4_put_lock_state(prev);
2677 goto out;
2678 }
2679 spin_lock(&state->state_lock);
2680 }
2681 }
2682 spin_unlock(&state->state_lock);
2683 nfs4_put_lock_state(prev);
2684 out:
2685 return ret;
2686 }
2687
2688 /**
2689 * nfs41_check_open_stateid - possibly free an open stateid
2690 *
2691 * @state: NFSv4 state for an inode
2692 *
2693 * Returns NFS_OK if recovery for this stateid is now finished.
2694 * Otherwise a negative NFS4ERR value is returned.
2695 */
2696 static int nfs41_check_open_stateid(struct nfs4_state *state)
2697 {
2698 struct nfs_server *server = NFS_SERVER(state->inode);
2699 nfs4_stateid *stateid = &state->open_stateid;
2700 struct rpc_cred *cred = state->owner->so_cred;
2701 int status;
2702
2703 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) {
2704 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) {
2705 if (nfs4_have_delegation(state->inode, state->state))
2706 return NFS_OK;
2707 return -NFS4ERR_OPENMODE;
2708 }
2709 return -NFS4ERR_BAD_STATEID;
2710 }
2711 status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
2712 trace_nfs4_test_open_stateid(state, NULL, status);
2713 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
2714 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2715 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2716 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2717 clear_bit(NFS_OPEN_STATE, &state->flags);
2718 stateid->type = NFS4_INVALID_STATEID_TYPE;
2719 return status;
2720 }
2721 if (nfs_open_stateid_recover_openmode(state))
2722 return -NFS4ERR_OPENMODE;
2723 return NFS_OK;
2724 }
2725
2726 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2727 {
2728 int status;
2729
2730 nfs41_check_delegation_stateid(state);
2731 status = nfs41_check_expired_locks(state);
2732 if (status != NFS_OK)
2733 return status;
2734 status = nfs41_check_open_stateid(state);
2735 if (status != NFS_OK)
2736 status = nfs4_open_expired(sp, state);
2737 return status;
2738 }
2739 #endif
2740
2741 /*
2742 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2743 * fields corresponding to attributes that were used to store the verifier.
2744 * Make sure we clobber those fields in the later setattr call
2745 */
2746 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2747 struct iattr *sattr, struct nfs4_label **label)
2748 {
2749 const u32 *attrset = opendata->o_res.attrset;
2750
2751 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2752 !(sattr->ia_valid & ATTR_ATIME_SET))
2753 sattr->ia_valid |= ATTR_ATIME;
2754
2755 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2756 !(sattr->ia_valid & ATTR_MTIME_SET))
2757 sattr->ia_valid |= ATTR_MTIME;
2758
2759 /* Except MODE, it seems harmless of setting twice. */
2760 if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
2761 (attrset[1] & FATTR4_WORD1_MODE ||
2762 attrset[2] & FATTR4_WORD2_MODE_UMASK))
2763 sattr->ia_valid &= ~ATTR_MODE;
2764
2765 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
2766 *label = NULL;
2767 }
2768
2769 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2770 fmode_t fmode,
2771 int flags,
2772 struct nfs_open_context *ctx)
2773 {
2774 struct nfs4_state_owner *sp = opendata->owner;
2775 struct nfs_server *server = sp->so_server;
2776 struct dentry *dentry;
2777 struct nfs4_state *state;
2778 unsigned int seq;
2779 int ret;
2780
2781 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2782
2783 ret = _nfs4_proc_open(opendata);
2784 if (ret != 0)
2785 goto out;
2786
2787 state = nfs4_opendata_to_nfs4_state(opendata);
2788 ret = PTR_ERR(state);
2789 if (IS_ERR(state))
2790 goto out;
2791 ctx->state = state;
2792 if (server->caps & NFS_CAP_POSIX_LOCK)
2793 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2794 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
2795 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
2796
2797 dentry = opendata->dentry;
2798 if (d_really_is_negative(dentry)) {
2799 struct dentry *alias;
2800 d_drop(dentry);
2801 alias = d_exact_alias(dentry, state->inode);
2802 if (!alias)
2803 alias = d_splice_alias(igrab(state->inode), dentry);
2804 /* d_splice_alias() can't fail here - it's a non-directory */
2805 if (alias) {
2806 dput(ctx->dentry);
2807 ctx->dentry = dentry = alias;
2808 }
2809 nfs_set_verifier(dentry,
2810 nfs_save_change_attribute(d_inode(opendata->dir)));
2811 }
2812
2813 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2814 if (ret != 0)
2815 goto out;
2816
2817 if (d_inode(dentry) == state->inode) {
2818 nfs_inode_attach_open_context(ctx);
2819 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2820 nfs4_schedule_stateid_recovery(server, state);
2821 }
2822 out:
2823 return ret;
2824 }
2825
2826 /*
2827 * Returns a referenced nfs4_state
2828 */
2829 static int _nfs4_do_open(struct inode *dir,
2830 struct nfs_open_context *ctx,
2831 int flags,
2832 const struct nfs4_open_createattrs *c,
2833 int *opened)
2834 {
2835 struct nfs4_state_owner *sp;
2836 struct nfs4_state *state = NULL;
2837 struct nfs_server *server = NFS_SERVER(dir);
2838 struct nfs4_opendata *opendata;
2839 struct dentry *dentry = ctx->dentry;
2840 struct rpc_cred *cred = ctx->cred;
2841 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2842 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2843 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2844 struct iattr *sattr = c->sattr;
2845 struct nfs4_label *label = c->label;
2846 struct nfs4_label *olabel = NULL;
2847 int status;
2848
2849 /* Protect against reboot recovery conflicts */
2850 status = -ENOMEM;
2851 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2852 if (sp == NULL) {
2853 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2854 goto out_err;
2855 }
2856 status = nfs4_client_recover_expired_lease(server->nfs_client);
2857 if (status != 0)
2858 goto err_put_state_owner;
2859 if (d_really_is_positive(dentry))
2860 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2861 status = -ENOMEM;
2862 if (d_really_is_positive(dentry))
2863 claim = NFS4_OPEN_CLAIM_FH;
2864 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
2865 c, claim, GFP_KERNEL);
2866 if (opendata == NULL)
2867 goto err_put_state_owner;
2868
2869 if (label) {
2870 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2871 if (IS_ERR(olabel)) {
2872 status = PTR_ERR(olabel);
2873 goto err_opendata_put;
2874 }
2875 }
2876
2877 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2878 if (!opendata->f_attr.mdsthreshold) {
2879 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2880 if (!opendata->f_attr.mdsthreshold)
2881 goto err_free_label;
2882 }
2883 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2884 }
2885 if (d_really_is_positive(dentry))
2886 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2887
2888 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2889 if (status != 0)
2890 goto err_free_label;
2891 state = ctx->state;
2892
2893 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
2894 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2895 nfs4_exclusive_attrset(opendata, sattr, &label);
2896 /*
2897 * send create attributes which was not set by open
2898 * with an extra setattr.
2899 */
2900 if (sattr->ia_valid & NFS4_VALID_ATTRS) {
2901 nfs_fattr_init(opendata->o_res.f_attr);
2902 status = nfs4_do_setattr(state->inode, cred,
2903 opendata->o_res.f_attr, sattr,
2904 ctx, label, olabel);
2905 if (status == 0) {
2906 nfs_setattr_update_inode(state->inode, sattr,
2907 opendata->o_res.f_attr);
2908 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2909 }
2910 }
2911 }
2912 if (opened && opendata->file_created)
2913 *opened |= FILE_CREATED;
2914
2915 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2916 *ctx_th = opendata->f_attr.mdsthreshold;
2917 opendata->f_attr.mdsthreshold = NULL;
2918 }
2919
2920 nfs4_label_free(olabel);
2921
2922 nfs4_opendata_put(opendata);
2923 nfs4_put_state_owner(sp);
2924 return 0;
2925 err_free_label:
2926 nfs4_label_free(olabel);
2927 err_opendata_put:
2928 nfs4_opendata_put(opendata);
2929 err_put_state_owner:
2930 nfs4_put_state_owner(sp);
2931 out_err:
2932 return status;
2933 }
2934
2935
2936 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2937 struct nfs_open_context *ctx,
2938 int flags,
2939 struct iattr *sattr,
2940 struct nfs4_label *label,
2941 int *opened)
2942 {
2943 struct nfs_server *server = NFS_SERVER(dir);
2944 struct nfs4_exception exception = { };
2945 struct nfs4_state *res;
2946 struct nfs4_open_createattrs c = {
2947 .label = label,
2948 .sattr = sattr,
2949 .verf = {
2950 [0] = (__u32)jiffies,
2951 [1] = (__u32)current->pid,
2952 },
2953 };
2954 int status;
2955
2956 do {
2957 status = _nfs4_do_open(dir, ctx, flags, &c, opened);
2958 res = ctx->state;
2959 trace_nfs4_open_file(ctx, flags, status);
2960 if (status == 0)
2961 break;
2962 /* NOTE: BAD_SEQID means the server and client disagree about the
2963 * book-keeping w.r.t. state-changing operations
2964 * (OPEN/CLOSE/LOCK/LOCKU...)
2965 * It is actually a sign of a bug on the client or on the server.
2966 *
2967 * If we receive a BAD_SEQID error in the particular case of
2968 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2969 * have unhashed the old state_owner for us, and that we can
2970 * therefore safely retry using a new one. We should still warn
2971 * the user though...
2972 */
2973 if (status == -NFS4ERR_BAD_SEQID) {
2974 pr_warn_ratelimited("NFS: v4 server %s "
2975 " returned a bad sequence-id error!\n",
2976 NFS_SERVER(dir)->nfs_client->cl_hostname);
2977 exception.retry = 1;
2978 continue;
2979 }
2980 /*
2981 * BAD_STATEID on OPEN means that the server cancelled our
2982 * state before it received the OPEN_CONFIRM.
2983 * Recover by retrying the request as per the discussion
2984 * on Page 181 of RFC3530.
2985 */
2986 if (status == -NFS4ERR_BAD_STATEID) {
2987 exception.retry = 1;
2988 continue;
2989 }
2990 if (status == -EAGAIN) {
2991 /* We must have found a delegation */
2992 exception.retry = 1;
2993 continue;
2994 }
2995 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2996 continue;
2997 res = ERR_PTR(nfs4_handle_exception(server,
2998 status, &exception));
2999 } while (exception.retry);
3000 return res;
3001 }
3002
3003 static int _nfs4_do_setattr(struct inode *inode,
3004 struct nfs_setattrargs *arg,
3005 struct nfs_setattrres *res,
3006 struct rpc_cred *cred,
3007 struct nfs_open_context *ctx)
3008 {
3009 struct nfs_server *server = NFS_SERVER(inode);
3010 struct rpc_message msg = {
3011 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
3012 .rpc_argp = arg,
3013 .rpc_resp = res,
3014 .rpc_cred = cred,
3015 };
3016 struct rpc_cred *delegation_cred = NULL;
3017 unsigned long timestamp = jiffies;
3018 fmode_t fmode;
3019 bool truncate;
3020 int status;
3021
3022 nfs_fattr_init(res->fattr);
3023
3024 /* Servers should only apply open mode checks for file size changes */
3025 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
3026 fmode = truncate ? FMODE_WRITE : FMODE_READ;
3027
3028 if (nfs4_copy_delegation_stateid(inode, fmode, &arg->stateid, &delegation_cred)) {
3029 /* Use that stateid */
3030 } else if (truncate && ctx != NULL) {
3031 struct nfs_lock_context *l_ctx;
3032 if (!nfs4_valid_open_stateid(ctx->state))
3033 return -EBADF;
3034 l_ctx = nfs_get_lock_context(ctx);
3035 if (IS_ERR(l_ctx))
3036 return PTR_ERR(l_ctx);
3037 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
3038 &arg->stateid, &delegation_cred);
3039 nfs_put_lock_context(l_ctx);
3040 if (status == -EIO)
3041 return -EBADF;
3042 } else
3043 nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3044 if (delegation_cred)
3045 msg.rpc_cred = delegation_cred;
3046
3047 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
3048
3049 put_rpccred(delegation_cred);
3050 if (status == 0 && ctx != NULL)
3051 renew_lease(server, timestamp);
3052 trace_nfs4_setattr(inode, &arg->stateid, status);
3053 return status;
3054 }
3055
3056 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
3057 struct nfs_fattr *fattr, struct iattr *sattr,
3058 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
3059 struct nfs4_label *olabel)
3060 {
3061 struct nfs_server *server = NFS_SERVER(inode);
3062 struct nfs4_state *state = ctx ? ctx->state : NULL;
3063 struct nfs_setattrargs arg = {
3064 .fh = NFS_FH(inode),
3065 .iap = sattr,
3066 .server = server,
3067 .bitmask = server->attr_bitmask,
3068 .label = ilabel,
3069 };
3070 struct nfs_setattrres res = {
3071 .fattr = fattr,
3072 .label = olabel,
3073 .server = server,
3074 };
3075 struct nfs4_exception exception = {
3076 .state = state,
3077 .inode = inode,
3078 .stateid = &arg.stateid,
3079 };
3080 int err;
3081
3082 arg.bitmask = nfs4_bitmask(server, ilabel);
3083 if (ilabel)
3084 arg.bitmask = nfs4_bitmask(server, olabel);
3085
3086 do {
3087 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
3088 switch (err) {
3089 case -NFS4ERR_OPENMODE:
3090 if (!(sattr->ia_valid & ATTR_SIZE)) {
3091 pr_warn_once("NFSv4: server %s is incorrectly "
3092 "applying open mode checks to "
3093 "a SETATTR that is not "
3094 "changing file size.\n",
3095 server->nfs_client->cl_hostname);
3096 }
3097 if (state && !(state->state & FMODE_WRITE)) {
3098 err = -EBADF;
3099 if (sattr->ia_valid & ATTR_OPEN)
3100 err = -EACCES;
3101 goto out;
3102 }
3103 }
3104 err = nfs4_handle_exception(server, err, &exception);
3105 } while (exception.retry);
3106 out:
3107 return err;
3108 }
3109
3110 static bool
3111 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
3112 {
3113 if (inode == NULL || !nfs_have_layout(inode))
3114 return false;
3115
3116 return pnfs_wait_on_layoutreturn(inode, task);
3117 }
3118
3119 struct nfs4_closedata {
3120 struct inode *inode;
3121 struct nfs4_state *state;
3122 struct nfs_closeargs arg;
3123 struct nfs_closeres res;
3124 struct {
3125 struct nfs4_layoutreturn_args arg;
3126 struct nfs4_layoutreturn_res res;
3127 struct nfs4_xdr_opaque_data ld_private;
3128 u32 roc_barrier;
3129 bool roc;
3130 } lr;
3131 struct nfs_fattr fattr;
3132 unsigned long timestamp;
3133 };
3134
3135 static void nfs4_free_closedata(void *data)
3136 {
3137 struct nfs4_closedata *calldata = data;
3138 struct nfs4_state_owner *sp = calldata->state->owner;
3139 struct super_block *sb = calldata->state->inode->i_sb;
3140
3141 if (calldata->lr.roc)
3142 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
3143 calldata->res.lr_ret);
3144 nfs4_put_open_state(calldata->state);
3145 nfs_free_seqid(calldata->arg.seqid);
3146 nfs4_put_state_owner(sp);
3147 nfs_sb_deactive(sb);
3148 kfree(calldata);
3149 }
3150
3151 static void nfs4_close_done(struct rpc_task *task, void *data)
3152 {
3153 struct nfs4_closedata *calldata = data;
3154 struct nfs4_state *state = calldata->state;
3155 struct nfs_server *server = NFS_SERVER(calldata->inode);
3156 nfs4_stateid *res_stateid = NULL;
3157 struct nfs4_exception exception = {
3158 .state = state,
3159 .inode = calldata->inode,
3160 .stateid = &calldata->arg.stateid,
3161 };
3162
3163 dprintk("%s: begin!\n", __func__);
3164 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3165 return;
3166 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
3167
3168 /* Handle Layoutreturn errors */
3169 if (calldata->arg.lr_args && task->tk_status != 0) {
3170 switch (calldata->res.lr_ret) {
3171 default:
3172 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3173 break;
3174 case 0:
3175 calldata->arg.lr_args = NULL;
3176 calldata->res.lr_res = NULL;
3177 break;
3178 case -NFS4ERR_OLD_STATEID:
3179 if (nfs4_refresh_layout_stateid(&calldata->arg.lr_args->stateid,
3180 calldata->inode))
3181 goto lr_restart;
3182 /* Fallthrough */
3183 case -NFS4ERR_ADMIN_REVOKED:
3184 case -NFS4ERR_DELEG_REVOKED:
3185 case -NFS4ERR_EXPIRED:
3186 case -NFS4ERR_BAD_STATEID:
3187 case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
3188 case -NFS4ERR_WRONG_CRED:
3189 calldata->arg.lr_args = NULL;
3190 calldata->res.lr_res = NULL;
3191 goto lr_restart;
3192 }
3193 }
3194
3195 /* hmm. we are done with the inode, and in the process of freeing
3196 * the state_owner. we keep this around to process errors
3197 */
3198 switch (task->tk_status) {
3199 case 0:
3200 res_stateid = &calldata->res.stateid;
3201 renew_lease(server, calldata->timestamp);
3202 break;
3203 case -NFS4ERR_ACCESS:
3204 if (calldata->arg.bitmask != NULL) {
3205 calldata->arg.bitmask = NULL;
3206 calldata->res.fattr = NULL;
3207 goto out_restart;
3208
3209 }
3210 break;
3211 case -NFS4ERR_OLD_STATEID:
3212 /* Did we race with OPEN? */
3213 if (nfs4_refresh_open_stateid(&calldata->arg.stateid,
3214 state))
3215 goto out_restart;
3216 goto out_release;
3217 case -NFS4ERR_ADMIN_REVOKED:
3218 case -NFS4ERR_STALE_STATEID:
3219 case -NFS4ERR_EXPIRED:
3220 nfs4_free_revoked_stateid(server,
3221 &calldata->arg.stateid,
3222 task->tk_msg.rpc_cred);
3223 /* Fallthrough */
3224 case -NFS4ERR_BAD_STATEID:
3225 break;
3226 default:
3227 task->tk_status = nfs4_async_handle_exception(task,
3228 server, task->tk_status, &exception);
3229 if (exception.retry)
3230 goto out_restart;
3231 }
3232 nfs_clear_open_stateid(state, &calldata->arg.stateid,
3233 res_stateid, calldata->arg.fmode);
3234 out_release:
3235 task->tk_status = 0;
3236 nfs_release_seqid(calldata->arg.seqid);
3237 nfs_refresh_inode(calldata->inode, &calldata->fattr);
3238 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
3239 return;
3240 lr_restart:
3241 calldata->res.lr_ret = 0;
3242 out_restart:
3243 task->tk_status = 0;
3244 rpc_restart_call_prepare(task);
3245 goto out_release;
3246 }
3247
3248 static void nfs4_close_prepare(struct rpc_task *task, void *data)
3249 {
3250 struct nfs4_closedata *calldata = data;
3251 struct nfs4_state *state = calldata->state;
3252 struct inode *inode = calldata->inode;
3253 bool is_rdonly, is_wronly, is_rdwr;
3254 int call_close = 0;
3255
3256 dprintk("%s: begin!\n", __func__);
3257 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3258 goto out_wait;
3259
3260 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
3261 spin_lock(&state->owner->so_lock);
3262 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
3263 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
3264 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
3265 /* Calculate the change in open mode */
3266 calldata->arg.fmode = 0;
3267 if (state->n_rdwr == 0) {
3268 if (state->n_rdonly == 0)
3269 call_close |= is_rdonly;
3270 else if (is_rdonly)
3271 calldata->arg.fmode |= FMODE_READ;
3272 if (state->n_wronly == 0)
3273 call_close |= is_wronly;
3274 else if (is_wronly)
3275 calldata->arg.fmode |= FMODE_WRITE;
3276 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3277 call_close |= is_rdwr;
3278 } else if (is_rdwr)
3279 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3280
3281 if (!nfs4_valid_open_stateid(state) ||
3282 !nfs4_refresh_open_stateid(&calldata->arg.stateid, state))
3283 call_close = 0;
3284 spin_unlock(&state->owner->so_lock);
3285
3286 if (!call_close) {
3287 /* Note: exit _without_ calling nfs4_close_done */
3288 goto out_no_action;
3289 }
3290
3291 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
3292 nfs_release_seqid(calldata->arg.seqid);
3293 goto out_wait;
3294 }
3295
3296 if (calldata->arg.fmode == 0)
3297 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
3298
3299 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
3300 /* Close-to-open cache consistency revalidation */
3301 if (!nfs4_have_delegation(inode, FMODE_READ))
3302 calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
3303 else
3304 calldata->arg.bitmask = NULL;
3305 }
3306
3307 calldata->arg.share_access =
3308 nfs4_map_atomic_open_share(NFS_SERVER(inode),
3309 calldata->arg.fmode, 0);
3310
3311 if (calldata->res.fattr == NULL)
3312 calldata->arg.bitmask = NULL;
3313 else if (calldata->arg.bitmask == NULL)
3314 calldata->res.fattr = NULL;
3315 calldata->timestamp = jiffies;
3316 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
3317 &calldata->arg.seq_args,
3318 &calldata->res.seq_res,
3319 task) != 0)
3320 nfs_release_seqid(calldata->arg.seqid);
3321 dprintk("%s: done!\n", __func__);
3322 return;
3323 out_no_action:
3324 task->tk_action = NULL;
3325 out_wait:
3326 nfs4_sequence_done(task, &calldata->res.seq_res);
3327 }
3328
3329 static const struct rpc_call_ops nfs4_close_ops = {
3330 .rpc_call_prepare = nfs4_close_prepare,
3331 .rpc_call_done = nfs4_close_done,
3332 .rpc_release = nfs4_free_closedata,
3333 };
3334
3335 /*
3336 * It is possible for data to be read/written from a mem-mapped file
3337 * after the sys_close call (which hits the vfs layer as a flush).
3338 * This means that we can't safely call nfsv4 close on a file until
3339 * the inode is cleared. This in turn means that we are not good
3340 * NFSv4 citizens - we do not indicate to the server to update the file's
3341 * share state even when we are done with one of the three share
3342 * stateid's in the inode.
3343 *
3344 * NOTE: Caller must be holding the sp->so_owner semaphore!
3345 */
3346 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3347 {
3348 struct nfs_server *server = NFS_SERVER(state->inode);
3349 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3350 struct nfs4_closedata *calldata;
3351 struct nfs4_state_owner *sp = state->owner;
3352 struct rpc_task *task;
3353 struct rpc_message msg = {
3354 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3355 .rpc_cred = state->owner->so_cred,
3356 };
3357 struct rpc_task_setup task_setup_data = {
3358 .rpc_client = server->client,
3359 .rpc_message = &msg,
3360 .callback_ops = &nfs4_close_ops,
3361 .workqueue = nfsiod_workqueue,
3362 .flags = RPC_TASK_ASYNC,
3363 };
3364 int status = -ENOMEM;
3365
3366 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3367 &task_setup_data.rpc_client, &msg);
3368
3369 calldata = kzalloc(sizeof(*calldata), gfp_mask);
3370 if (calldata == NULL)
3371 goto out;
3372 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
3373 calldata->inode = state->inode;
3374 calldata->state = state;
3375 calldata->arg.fh = NFS_FH(state->inode);
3376 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
3377 goto out_free_calldata;
3378 /* Serialization for the sequence id */
3379 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3380 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3381 if (IS_ERR(calldata->arg.seqid))
3382 goto out_free_calldata;
3383 nfs_fattr_init(&calldata->fattr);
3384 calldata->arg.fmode = 0;
3385 calldata->lr.arg.ld_private = &calldata->lr.ld_private;
3386 calldata->res.fattr = &calldata->fattr;
3387 calldata->res.seqid = calldata->arg.seqid;
3388 calldata->res.server = server;
3389 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3390 calldata->lr.roc = pnfs_roc(state->inode,
3391 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
3392 if (calldata->lr.roc) {
3393 calldata->arg.lr_args = &calldata->lr.arg;
3394 calldata->res.lr_res = &calldata->lr.res;
3395 }
3396 nfs_sb_active(calldata->inode->i_sb);
3397
3398 msg.rpc_argp = &calldata->arg;
3399 msg.rpc_resp = &calldata->res;
3400 task_setup_data.callback_data = calldata;
3401 task = rpc_run_task(&task_setup_data);
3402 if (IS_ERR(task))
3403 return PTR_ERR(task);
3404 status = 0;
3405 if (wait)
3406 status = rpc_wait_for_completion_task(task);
3407 rpc_put_task(task);
3408 return status;
3409 out_free_calldata:
3410 kfree(calldata);
3411 out:
3412 nfs4_put_open_state(state);
3413 nfs4_put_state_owner(sp);
3414 return status;
3415 }
3416
3417 static struct inode *
3418 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3419 int open_flags, struct iattr *attr, int *opened)
3420 {
3421 struct nfs4_state *state;
3422 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
3423
3424 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3425
3426 /* Protect against concurrent sillydeletes */
3427 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3428
3429 nfs4_label_release_security(label);
3430
3431 if (IS_ERR(state))
3432 return ERR_CAST(state);
3433 return state->inode;
3434 }
3435
3436 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3437 {
3438 if (ctx->state == NULL)
3439 return;
3440 if (is_sync)
3441 nfs4_close_sync(ctx->state, ctx->mode);
3442 else
3443 nfs4_close_state(ctx->state, ctx->mode);
3444 }
3445
3446 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3447 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3448 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_MODE_UMASK - 1UL)
3449
3450 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3451 {
3452 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3453 struct nfs4_server_caps_arg args = {
3454 .fhandle = fhandle,
3455 .bitmask = bitmask,
3456 };
3457 struct nfs4_server_caps_res res = {};
3458 struct rpc_message msg = {
3459 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3460 .rpc_argp = &args,
3461 .rpc_resp = &res,
3462 };
3463 int status;
3464 int i;
3465
3466 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3467 FATTR4_WORD0_FH_EXPIRE_TYPE |
3468 FATTR4_WORD0_LINK_SUPPORT |
3469 FATTR4_WORD0_SYMLINK_SUPPORT |
3470 FATTR4_WORD0_ACLSUPPORT;
3471 if (minorversion)
3472 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3473
3474 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3475 if (status == 0) {
3476 /* Sanity check the server answers */
3477 switch (minorversion) {
3478 case 0:
3479 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3480 res.attr_bitmask[2] = 0;
3481 break;
3482 case 1:
3483 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3484 break;
3485 case 2:
3486 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3487 }
3488 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3489 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
3490 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
3491 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
3492 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
3493 NFS_CAP_CTIME|NFS_CAP_MTIME|
3494 NFS_CAP_SECURITY_LABEL);
3495 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3496 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3497 server->caps |= NFS_CAP_ACLS;
3498 if (res.has_links != 0)
3499 server->caps |= NFS_CAP_HARDLINKS;
3500 if (res.has_symlinks != 0)
3501 server->caps |= NFS_CAP_SYMLINKS;
3502 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
3503 server->caps |= NFS_CAP_FILEID;
3504 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
3505 server->caps |= NFS_CAP_MODE;
3506 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
3507 server->caps |= NFS_CAP_NLINK;
3508 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
3509 server->caps |= NFS_CAP_OWNER;
3510 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
3511 server->caps |= NFS_CAP_OWNER_GROUP;
3512 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
3513 server->caps |= NFS_CAP_ATIME;
3514 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
3515 server->caps |= NFS_CAP_CTIME;
3516 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
3517 server->caps |= NFS_CAP_MTIME;
3518 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3519 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3520 server->caps |= NFS_CAP_SECURITY_LABEL;
3521 #endif
3522 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3523 sizeof(server->attr_bitmask));
3524 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3525
3526 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3527 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3528 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3529 server->cache_consistency_bitmask[2] = 0;
3530
3531 /* Avoid a regression due to buggy server */
3532 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
3533 res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
3534 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3535 sizeof(server->exclcreat_bitmask));
3536
3537 server->acl_bitmask = res.acl_bitmask;
3538 server->fh_expire_type = res.fh_expire_type;
3539 }
3540
3541 return status;
3542 }
3543
3544 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3545 {
3546 struct nfs4_exception exception = { };
3547 int err;
3548 do {
3549 err = nfs4_handle_exception(server,
3550 _nfs4_server_capabilities(server, fhandle),
3551 &exception);
3552 } while (exception.retry);
3553 return err;
3554 }
3555
3556 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3557 struct nfs_fsinfo *info)
3558 {
3559 u32 bitmask[3];
3560 struct nfs4_lookup_root_arg args = {
3561 .bitmask = bitmask,
3562 };
3563 struct nfs4_lookup_res res = {
3564 .server = server,
3565 .fattr = info->fattr,
3566 .fh = fhandle,
3567 };
3568 struct rpc_message msg = {
3569 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3570 .rpc_argp = &args,
3571 .rpc_resp = &res,
3572 };
3573
3574 bitmask[0] = nfs4_fattr_bitmap[0];
3575 bitmask[1] = nfs4_fattr_bitmap[1];
3576 /*
3577 * Process the label in the upcoming getfattr
3578 */
3579 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3580
3581 nfs_fattr_init(info->fattr);
3582 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3583 }
3584
3585 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3586 struct nfs_fsinfo *info)
3587 {
3588 struct nfs4_exception exception = { };
3589 int err;
3590 do {
3591 err = _nfs4_lookup_root(server, fhandle, info);
3592 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3593 switch (err) {
3594 case 0:
3595 case -NFS4ERR_WRONGSEC:
3596 goto out;
3597 default:
3598 err = nfs4_handle_exception(server, err, &exception);
3599 }
3600 } while (exception.retry);
3601 out:
3602 return err;
3603 }
3604
3605 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3606 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3607 {
3608 struct rpc_auth_create_args auth_args = {
3609 .pseudoflavor = flavor,
3610 };
3611 struct rpc_auth *auth;
3612
3613 auth = rpcauth_create(&auth_args, server->client);
3614 if (IS_ERR(auth))
3615 return -EACCES;
3616 return nfs4_lookup_root(server, fhandle, info);
3617 }
3618
3619 /*
3620 * Retry pseudoroot lookup with various security flavors. We do this when:
3621 *
3622 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3623 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3624 *
3625 * Returns zero on success, or a negative NFS4ERR value, or a
3626 * negative errno value.
3627 */
3628 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3629 struct nfs_fsinfo *info)
3630 {
3631 /* Per 3530bis 15.33.5 */
3632 static const rpc_authflavor_t flav_array[] = {
3633 RPC_AUTH_GSS_KRB5P,
3634 RPC_AUTH_GSS_KRB5I,
3635 RPC_AUTH_GSS_KRB5,
3636 RPC_AUTH_UNIX, /* courtesy */
3637 RPC_AUTH_NULL,
3638 };
3639 int status = -EPERM;
3640 size_t i;
3641
3642 if (server->auth_info.flavor_len > 0) {
3643 /* try each flavor specified by user */
3644 for (i = 0; i < server->auth_info.flavor_len; i++) {
3645 status = nfs4_lookup_root_sec(server, fhandle, info,
3646 server->auth_info.flavors[i]);
3647 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3648 continue;
3649 break;
3650 }
3651 } else {
3652 /* no flavors specified by user, try default list */
3653 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3654 status = nfs4_lookup_root_sec(server, fhandle, info,
3655 flav_array[i]);
3656 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3657 continue;
3658 break;
3659 }
3660 }
3661
3662 /*
3663 * -EACCESS could mean that the user doesn't have correct permissions
3664 * to access the mount. It could also mean that we tried to mount
3665 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3666 * existing mount programs don't handle -EACCES very well so it should
3667 * be mapped to -EPERM instead.
3668 */
3669 if (status == -EACCES)
3670 status = -EPERM;
3671 return status;
3672 }
3673
3674 /**
3675 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3676 * @server: initialized nfs_server handle
3677 * @fhandle: we fill in the pseudo-fs root file handle
3678 * @info: we fill in an FSINFO struct
3679 * @auth_probe: probe the auth flavours
3680 *
3681 * Returns zero on success, or a negative errno.
3682 */
3683 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3684 struct nfs_fsinfo *info,
3685 bool auth_probe)
3686 {
3687 int status = 0;
3688
3689 if (!auth_probe)
3690 status = nfs4_lookup_root(server, fhandle, info);
3691
3692 if (auth_probe || status == NFS4ERR_WRONGSEC)
3693 status = server->nfs_client->cl_mvops->find_root_sec(server,
3694 fhandle, info);
3695
3696 if (status == 0)
3697 status = nfs4_server_capabilities(server, fhandle);
3698 if (status == 0)
3699 status = nfs4_do_fsinfo(server, fhandle, info);
3700
3701 return nfs4_map_errors(status);
3702 }
3703
3704 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3705 struct nfs_fsinfo *info)
3706 {
3707 int error;
3708 struct nfs_fattr *fattr = info->fattr;
3709 struct nfs4_label *label = NULL;
3710
3711 error = nfs4_server_capabilities(server, mntfh);
3712 if (error < 0) {
3713 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3714 return error;
3715 }
3716
3717 label = nfs4_label_alloc(server, GFP_KERNEL);
3718 if (IS_ERR(label))
3719 return PTR_ERR(label);
3720
3721 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3722 if (error < 0) {
3723 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3724 goto err_free_label;
3725 }
3726
3727 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3728 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3729 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3730
3731 err_free_label:
3732 nfs4_label_free(label);
3733
3734 return error;
3735 }
3736
3737 /*
3738 * Get locations and (maybe) other attributes of a referral.
3739 * Note that we'll actually follow the referral later when
3740 * we detect fsid mismatch in inode revalidation
3741 */
3742 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3743 const struct qstr *name, struct nfs_fattr *fattr,
3744 struct nfs_fh *fhandle)
3745 {
3746 int status = -ENOMEM;
3747 struct page *page = NULL;
3748 struct nfs4_fs_locations *locations = NULL;
3749
3750 page = alloc_page(GFP_KERNEL);
3751 if (page == NULL)
3752 goto out;
3753 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3754 if (locations == NULL)
3755 goto out;
3756
3757 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3758 if (status != 0)
3759 goto out;
3760
3761 /*
3762 * If the fsid didn't change, this is a migration event, not a
3763 * referral. Cause us to drop into the exception handler, which
3764 * will kick off migration recovery.
3765 */
3766 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3767 dprintk("%s: server did not return a different fsid for"
3768 " a referral at %s\n", __func__, name->name);
3769 status = -NFS4ERR_MOVED;
3770 goto out;
3771 }
3772 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3773 nfs_fixup_referral_attributes(&locations->fattr);
3774
3775 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3776 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3777 memset(fhandle, 0, sizeof(struct nfs_fh));
3778 out:
3779 if (page)
3780 __free_page(page);
3781 kfree(locations);
3782 return status;
3783 }
3784
3785 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3786 struct nfs_fattr *fattr, struct nfs4_label *label)
3787 {
3788 struct nfs4_getattr_arg args = {
3789 .fh = fhandle,
3790 .bitmask = server->attr_bitmask,
3791 };
3792 struct nfs4_getattr_res res = {
3793 .fattr = fattr,
3794 .label = label,
3795 .server = server,
3796 };
3797 struct rpc_message msg = {
3798 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3799 .rpc_argp = &args,
3800 .rpc_resp = &res,
3801 };
3802
3803 args.bitmask = nfs4_bitmask(server, label);
3804
3805 nfs_fattr_init(fattr);
3806 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3807 }
3808
3809 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3810 struct nfs_fattr *fattr, struct nfs4_label *label)
3811 {
3812 struct nfs4_exception exception = { };
3813 int err;
3814 do {
3815 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3816 trace_nfs4_getattr(server, fhandle, fattr, err);
3817 err = nfs4_handle_exception(server, err,
3818 &exception);
3819 } while (exception.retry);
3820 return err;
3821 }
3822
3823 /*
3824 * The file is not closed if it is opened due to the a request to change
3825 * the size of the file. The open call will not be needed once the
3826 * VFS layer lookup-intents are implemented.
3827 *
3828 * Close is called when the inode is destroyed.
3829 * If we haven't opened the file for O_WRONLY, we
3830 * need to in the size_change case to obtain a stateid.
3831 *
3832 * Got race?
3833 * Because OPEN is always done by name in nfsv4, it is
3834 * possible that we opened a different file by the same
3835 * name. We can recognize this race condition, but we
3836 * can't do anything about it besides returning an error.
3837 *
3838 * This will be fixed with VFS changes (lookup-intent).
3839 */
3840 static int
3841 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3842 struct iattr *sattr)
3843 {
3844 struct inode *inode = d_inode(dentry);
3845 struct rpc_cred *cred = NULL;
3846 struct nfs_open_context *ctx = NULL;
3847 struct nfs4_label *label = NULL;
3848 int status;
3849
3850 if (pnfs_ld_layoutret_on_setattr(inode) &&
3851 sattr->ia_valid & ATTR_SIZE &&
3852 sattr->ia_size < i_size_read(inode))
3853 pnfs_commit_and_return_layout(inode);
3854
3855 nfs_fattr_init(fattr);
3856
3857 /* Deal with open(O_TRUNC) */
3858 if (sattr->ia_valid & ATTR_OPEN)
3859 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3860
3861 /* Optimization: if the end result is no change, don't RPC */
3862 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3863 return 0;
3864
3865 /* Search for an existing open(O_WRITE) file */
3866 if (sattr->ia_valid & ATTR_FILE) {
3867
3868 ctx = nfs_file_open_context(sattr->ia_file);
3869 if (ctx)
3870 cred = ctx->cred;
3871 }
3872
3873 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3874 if (IS_ERR(label))
3875 return PTR_ERR(label);
3876
3877 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL, label);
3878 if (status == 0) {
3879 nfs_setattr_update_inode(inode, sattr, fattr);
3880 nfs_setsecurity(inode, fattr, label);
3881 }
3882 nfs4_label_free(label);
3883 return status;
3884 }
3885
3886 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3887 const struct qstr *name, struct nfs_fh *fhandle,
3888 struct nfs_fattr *fattr, struct nfs4_label *label)
3889 {
3890 struct nfs_server *server = NFS_SERVER(dir);
3891 int status;
3892 struct nfs4_lookup_arg args = {
3893 .bitmask = server->attr_bitmask,
3894 .dir_fh = NFS_FH(dir),
3895 .name = name,
3896 };
3897 struct nfs4_lookup_res res = {
3898 .server = server,
3899 .fattr = fattr,
3900 .label = label,
3901 .fh = fhandle,
3902 };
3903 struct rpc_message msg = {
3904 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3905 .rpc_argp = &args,
3906 .rpc_resp = &res,
3907 };
3908
3909 args.bitmask = nfs4_bitmask(server, label);
3910
3911 nfs_fattr_init(fattr);
3912
3913 dprintk("NFS call lookup %s\n", name->name);
3914 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3915 dprintk("NFS reply lookup: %d\n", status);
3916 return status;
3917 }
3918
3919 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3920 {
3921 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3922 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3923 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3924 fattr->nlink = 2;
3925 }
3926
3927 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3928 const struct qstr *name, struct nfs_fh *fhandle,
3929 struct nfs_fattr *fattr, struct nfs4_label *label)
3930 {
3931 struct nfs4_exception exception = { };
3932 struct rpc_clnt *client = *clnt;
3933 int err;
3934 do {
3935 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3936 trace_nfs4_lookup(dir, name, err);
3937 switch (err) {
3938 case -NFS4ERR_BADNAME:
3939 err = -ENOENT;
3940 goto out;
3941 case -NFS4ERR_MOVED:
3942 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3943 if (err == -NFS4ERR_MOVED)
3944 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3945 goto out;
3946 case -NFS4ERR_WRONGSEC:
3947 err = -EPERM;
3948 if (client != *clnt)
3949 goto out;
3950 client = nfs4_negotiate_security(client, dir, name);
3951 if (IS_ERR(client))
3952 return PTR_ERR(client);
3953
3954 exception.retry = 1;
3955 break;
3956 default:
3957 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3958 }
3959 } while (exception.retry);
3960
3961 out:
3962 if (err == 0)
3963 *clnt = client;
3964 else if (client != *clnt)
3965 rpc_shutdown_client(client);
3966
3967 return err;
3968 }
3969
3970 static int nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
3971 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3972 struct nfs4_label *label)
3973 {
3974 int status;
3975 struct rpc_clnt *client = NFS_CLIENT(dir);
3976
3977 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3978 if (client != NFS_CLIENT(dir)) {
3979 rpc_shutdown_client(client);
3980 nfs_fixup_secinfo_attributes(fattr);
3981 }
3982 return status;
3983 }
3984
3985 struct rpc_clnt *
3986 nfs4_proc_lookup_mountpoint(struct inode *dir, const struct qstr *name,
3987 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3988 {
3989 struct rpc_clnt *client = NFS_CLIENT(dir);
3990 int status;
3991
3992 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3993 if (status < 0)
3994 return ERR_PTR(status);
3995 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3996 }
3997
3998 static int _nfs4_proc_lookupp(struct inode *inode,
3999 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4000 struct nfs4_label *label)
4001 {
4002 struct rpc_clnt *clnt = NFS_CLIENT(inode);
4003 struct nfs_server *server = NFS_SERVER(inode);
4004 int status;
4005 struct nfs4_lookupp_arg args = {
4006 .bitmask = server->attr_bitmask,
4007 .fh = NFS_FH(inode),
4008 };
4009 struct nfs4_lookupp_res res = {
4010 .server = server,
4011 .fattr = fattr,
4012 .label = label,
4013 .fh = fhandle,
4014 };
4015 struct rpc_message msg = {
4016 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
4017 .rpc_argp = &args,
4018 .rpc_resp = &res,
4019 };
4020
4021 args.bitmask = nfs4_bitmask(server, label);
4022
4023 nfs_fattr_init(fattr);
4024
4025 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
4026 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
4027 &res.seq_res, 0);
4028 dprintk("NFS reply lookupp: %d\n", status);
4029 return status;
4030 }
4031
4032 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
4033 struct nfs_fattr *fattr, struct nfs4_label *label)
4034 {
4035 struct nfs4_exception exception = { };
4036 int err;
4037 do {
4038 err = _nfs4_proc_lookupp(inode, fhandle, fattr, label);
4039 trace_nfs4_lookupp(inode, err);
4040 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4041 &exception);
4042 } while (exception.retry);
4043 return err;
4044 }
4045
4046 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4047 {
4048 struct nfs_server *server = NFS_SERVER(inode);
4049 struct nfs4_accessargs args = {
4050 .fh = NFS_FH(inode),
4051 .bitmask = server->cache_consistency_bitmask,
4052 .access = entry->mask,
4053 };
4054 struct nfs4_accessres res = {
4055 .server = server,
4056 };
4057 struct rpc_message msg = {
4058 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
4059 .rpc_argp = &args,
4060 .rpc_resp = &res,
4061 .rpc_cred = entry->cred,
4062 };
4063 int status = 0;
4064
4065 res.fattr = nfs_alloc_fattr();
4066 if (res.fattr == NULL)
4067 return -ENOMEM;
4068
4069 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4070 if (!status) {
4071 nfs_access_set_mask(entry, res.access);
4072 nfs_refresh_inode(inode, res.fattr);
4073 }
4074 nfs_free_fattr(res.fattr);
4075 return status;
4076 }
4077
4078 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4079 {
4080 struct nfs4_exception exception = { };
4081 int err;
4082 do {
4083 err = _nfs4_proc_access(inode, entry);
4084 trace_nfs4_access(inode, err);
4085 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4086 &exception);
4087 } while (exception.retry);
4088 return err;
4089 }
4090
4091 /*
4092 * TODO: For the time being, we don't try to get any attributes
4093 * along with any of the zero-copy operations READ, READDIR,
4094 * READLINK, WRITE.
4095 *
4096 * In the case of the first three, we want to put the GETATTR
4097 * after the read-type operation -- this is because it is hard
4098 * to predict the length of a GETATTR response in v4, and thus
4099 * align the READ data correctly. This means that the GETATTR
4100 * may end up partially falling into the page cache, and we should
4101 * shift it into the 'tail' of the xdr_buf before processing.
4102 * To do this efficiently, we need to know the total length
4103 * of data received, which doesn't seem to be available outside
4104 * of the RPC layer.
4105 *
4106 * In the case of WRITE, we also want to put the GETATTR after
4107 * the operation -- in this case because we want to make sure
4108 * we get the post-operation mtime and size.
4109 *
4110 * Both of these changes to the XDR layer would in fact be quite
4111 * minor, but I decided to leave them for a subsequent patch.
4112 */
4113 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
4114 unsigned int pgbase, unsigned int pglen)
4115 {
4116 struct nfs4_readlink args = {
4117 .fh = NFS_FH(inode),
4118 .pgbase = pgbase,
4119 .pglen = pglen,
4120 .pages = &page,
4121 };
4122 struct nfs4_readlink_res res;
4123 struct rpc_message msg = {
4124 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
4125 .rpc_argp = &args,
4126 .rpc_resp = &res,
4127 };
4128
4129 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
4130 }
4131
4132 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
4133 unsigned int pgbase, unsigned int pglen)
4134 {
4135 struct nfs4_exception exception = { };
4136 int err;
4137 do {
4138 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
4139 trace_nfs4_readlink(inode, err);
4140 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4141 &exception);
4142 } while (exception.retry);
4143 return err;
4144 }
4145
4146 /*
4147 * This is just for mknod. open(O_CREAT) will always do ->open_context().
4148 */
4149 static int
4150 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
4151 int flags)
4152 {
4153 struct nfs_server *server = NFS_SERVER(dir);
4154 struct nfs4_label l, *ilabel = NULL;
4155 struct nfs_open_context *ctx;
4156 struct nfs4_state *state;
4157 int status = 0;
4158
4159 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
4160 if (IS_ERR(ctx))
4161 return PTR_ERR(ctx);
4162
4163 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
4164
4165 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4166 sattr->ia_mode &= ~current_umask();
4167 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
4168 if (IS_ERR(state)) {
4169 status = PTR_ERR(state);
4170 goto out;
4171 }
4172 out:
4173 nfs4_label_release_security(ilabel);
4174 put_nfs_open_context(ctx);
4175 return status;
4176 }
4177
4178 static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
4179 {
4180 struct nfs_server *server = NFS_SERVER(dir);
4181 struct nfs_removeargs args = {
4182 .fh = NFS_FH(dir),
4183 .name = *name,
4184 };
4185 struct nfs_removeres res = {
4186 .server = server,
4187 };
4188 struct rpc_message msg = {
4189 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
4190 .rpc_argp = &args,
4191 .rpc_resp = &res,
4192 };
4193 unsigned long timestamp = jiffies;
4194 int status;
4195
4196 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4197 if (status == 0)
4198 update_changeattr(dir, &res.cinfo, timestamp);
4199 return status;
4200 }
4201
4202 static int nfs4_proc_remove(struct inode *dir, const struct qstr *name)
4203 {
4204 struct nfs4_exception exception = { };
4205 int err;
4206 do {
4207 err = _nfs4_proc_remove(dir, name);
4208 trace_nfs4_remove(dir, name, err);
4209 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4210 &exception);
4211 } while (exception.retry);
4212 return err;
4213 }
4214
4215 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
4216 {
4217 struct nfs_server *server = NFS_SERVER(dir);
4218 struct nfs_removeargs *args = msg->rpc_argp;
4219 struct nfs_removeres *res = msg->rpc_resp;
4220
4221 res->server = server;
4222 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
4223 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
4224
4225 nfs_fattr_init(res->dir_attr);
4226 }
4227
4228 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
4229 {
4230 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
4231 &data->args.seq_args,
4232 &data->res.seq_res,
4233 task);
4234 }
4235
4236 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4237 {
4238 struct nfs_unlinkdata *data = task->tk_calldata;
4239 struct nfs_removeres *res = &data->res;
4240
4241 if (!nfs4_sequence_done(task, &res->seq_res))
4242 return 0;
4243 if (nfs4_async_handle_error(task, res->server, NULL,
4244 &data->timeout) == -EAGAIN)
4245 return 0;
4246 if (task->tk_status == 0)
4247 update_changeattr(dir, &res->cinfo, res->dir_attr->time_start);
4248 return 1;
4249 }
4250
4251 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
4252 {
4253 struct nfs_server *server = NFS_SERVER(dir);
4254 struct nfs_renameargs *arg = msg->rpc_argp;
4255 struct nfs_renameres *res = msg->rpc_resp;
4256
4257 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
4258 res->server = server;
4259 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
4260 }
4261
4262 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
4263 {
4264 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
4265 &data->args.seq_args,
4266 &data->res.seq_res,
4267 task);
4268 }
4269
4270 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
4271 struct inode *new_dir)
4272 {
4273 struct nfs_renamedata *data = task->tk_calldata;
4274 struct nfs_renameres *res = &data->res;
4275
4276 if (!nfs4_sequence_done(task, &res->seq_res))
4277 return 0;
4278 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
4279 return 0;
4280
4281 if (task->tk_status == 0) {
4282 update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start);
4283 if (new_dir != old_dir)
4284 update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start);
4285 }
4286 return 1;
4287 }
4288
4289 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4290 {
4291 struct nfs_server *server = NFS_SERVER(inode);
4292 struct nfs4_link_arg arg = {
4293 .fh = NFS_FH(inode),
4294 .dir_fh = NFS_FH(dir),
4295 .name = name,
4296 .bitmask = server->attr_bitmask,
4297 };
4298 struct nfs4_link_res res = {
4299 .server = server,
4300 .label = NULL,
4301 };
4302 struct rpc_message msg = {
4303 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
4304 .rpc_argp = &arg,
4305 .rpc_resp = &res,
4306 };
4307 int status = -ENOMEM;
4308
4309 res.fattr = nfs_alloc_fattr();
4310 if (res.fattr == NULL)
4311 goto out;
4312
4313 res.label = nfs4_label_alloc(server, GFP_KERNEL);
4314 if (IS_ERR(res.label)) {
4315 status = PTR_ERR(res.label);
4316 goto out;
4317 }
4318 arg.bitmask = nfs4_bitmask(server, res.label);
4319
4320 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4321 if (!status) {
4322 update_changeattr(dir, &res.cinfo, res.fattr->time_start);
4323 status = nfs_post_op_update_inode(inode, res.fattr);
4324 if (!status)
4325 nfs_setsecurity(inode, res.fattr, res.label);
4326 }
4327
4328
4329 nfs4_label_free(res.label);
4330
4331 out:
4332 nfs_free_fattr(res.fattr);
4333 return status;
4334 }
4335
4336 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4337 {
4338 struct nfs4_exception exception = { };
4339 int err;
4340 do {
4341 err = nfs4_handle_exception(NFS_SERVER(inode),
4342 _nfs4_proc_link(inode, dir, name),
4343 &exception);
4344 } while (exception.retry);
4345 return err;
4346 }
4347
4348 struct nfs4_createdata {
4349 struct rpc_message msg;
4350 struct nfs4_create_arg arg;
4351 struct nfs4_create_res res;
4352 struct nfs_fh fh;
4353 struct nfs_fattr fattr;
4354 struct nfs4_label *label;
4355 };
4356
4357 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
4358 const struct qstr *name, struct iattr *sattr, u32 ftype)
4359 {
4360 struct nfs4_createdata *data;
4361
4362 data = kzalloc(sizeof(*data), GFP_KERNEL);
4363 if (data != NULL) {
4364 struct nfs_server *server = NFS_SERVER(dir);
4365
4366 data->label = nfs4_label_alloc(server, GFP_KERNEL);
4367 if (IS_ERR(data->label))
4368 goto out_free;
4369
4370 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
4371 data->msg.rpc_argp = &data->arg;
4372 data->msg.rpc_resp = &data->res;
4373 data->arg.dir_fh = NFS_FH(dir);
4374 data->arg.server = server;
4375 data->arg.name = name;
4376 data->arg.attrs = sattr;
4377 data->arg.ftype = ftype;
4378 data->arg.bitmask = nfs4_bitmask(server, data->label);
4379 data->arg.umask = current_umask();
4380 data->res.server = server;
4381 data->res.fh = &data->fh;
4382 data->res.fattr = &data->fattr;
4383 data->res.label = data->label;
4384 nfs_fattr_init(data->res.fattr);
4385 }
4386 return data;
4387 out_free:
4388 kfree(data);
4389 return NULL;
4390 }
4391
4392 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
4393 {
4394 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
4395 &data->arg.seq_args, &data->res.seq_res, 1);
4396 if (status == 0) {
4397 update_changeattr(dir, &data->res.dir_cinfo,
4398 data->res.fattr->time_start);
4399 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
4400 }
4401 return status;
4402 }
4403
4404 static void nfs4_free_createdata(struct nfs4_createdata *data)
4405 {
4406 nfs4_label_free(data->label);
4407 kfree(data);
4408 }
4409
4410 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4411 struct page *page, unsigned int len, struct iattr *sattr,
4412 struct nfs4_label *label)
4413 {
4414 struct nfs4_createdata *data;
4415 int status = -ENAMETOOLONG;
4416
4417 if (len > NFS4_MAXPATHLEN)
4418 goto out;
4419
4420 status = -ENOMEM;
4421 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
4422 if (data == NULL)
4423 goto out;
4424
4425 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
4426 data->arg.u.symlink.pages = &page;
4427 data->arg.u.symlink.len = len;
4428 data->arg.label = label;
4429
4430 status = nfs4_do_create(dir, dentry, data);
4431
4432 nfs4_free_createdata(data);
4433 out:
4434 return status;
4435 }
4436
4437 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4438 struct page *page, unsigned int len, struct iattr *sattr)
4439 {
4440 struct nfs4_exception exception = { };
4441 struct nfs4_label l, *label = NULL;
4442 int err;
4443
4444 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4445
4446 do {
4447 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
4448 trace_nfs4_symlink(dir, &dentry->d_name, err);
4449 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4450 &exception);
4451 } while (exception.retry);
4452
4453 nfs4_label_release_security(label);
4454 return err;
4455 }
4456
4457 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4458 struct iattr *sattr, struct nfs4_label *label)
4459 {
4460 struct nfs4_createdata *data;
4461 int status = -ENOMEM;
4462
4463 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
4464 if (data == NULL)
4465 goto out;
4466
4467 data->arg.label = label;
4468 status = nfs4_do_create(dir, dentry, data);
4469
4470 nfs4_free_createdata(data);
4471 out:
4472 return status;
4473 }
4474
4475 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4476 struct iattr *sattr)
4477 {
4478 struct nfs_server *server = NFS_SERVER(dir);
4479 struct nfs4_exception exception = { };
4480 struct nfs4_label l, *label = NULL;
4481 int err;
4482
4483 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4484
4485 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4486 sattr->ia_mode &= ~current_umask();
4487 do {
4488 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
4489 trace_nfs4_mkdir(dir, &dentry->d_name, err);
4490 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4491 &exception);
4492 } while (exception.retry);
4493 nfs4_label_release_security(label);
4494
4495 return err;
4496 }
4497
4498 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4499 u64 cookie, struct page **pages, unsigned int count, bool plus)
4500 {
4501 struct inode *dir = d_inode(dentry);
4502 struct nfs4_readdir_arg args = {
4503 .fh = NFS_FH(dir),
4504 .pages = pages,
4505 .pgbase = 0,
4506 .count = count,
4507 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
4508 .plus = plus,
4509 };
4510 struct nfs4_readdir_res res;
4511 struct rpc_message msg = {
4512 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
4513 .rpc_argp = &args,
4514 .rpc_resp = &res,
4515 .rpc_cred = cred,
4516 };
4517 int status;
4518
4519 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
4520 dentry,
4521 (unsigned long long)cookie);
4522 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
4523 res.pgbase = args.pgbase;
4524 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
4525 if (status >= 0) {
4526 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
4527 status += args.pgbase;
4528 }
4529
4530 nfs_invalidate_atime(dir);
4531
4532 dprintk("%s: returns %d\n", __func__, status);
4533 return status;
4534 }
4535
4536 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4537 u64 cookie, struct page **pages, unsigned int count, bool plus)
4538 {
4539 struct nfs4_exception exception = { };
4540 int err;
4541 do {
4542 err = _nfs4_proc_readdir(dentry, cred, cookie,
4543 pages, count, plus);
4544 trace_nfs4_readdir(d_inode(dentry), err);
4545 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
4546 &exception);
4547 } while (exception.retry);
4548 return err;
4549 }
4550
4551 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4552 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
4553 {
4554 struct nfs4_createdata *data;
4555 int mode = sattr->ia_mode;
4556 int status = -ENOMEM;
4557
4558 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
4559 if (data == NULL)
4560 goto out;
4561
4562 if (S_ISFIFO(mode))
4563 data->arg.ftype = NF4FIFO;
4564 else if (S_ISBLK(mode)) {
4565 data->arg.ftype = NF4BLK;
4566 data->arg.u.device.specdata1 = MAJOR(rdev);
4567 data->arg.u.device.specdata2 = MINOR(rdev);
4568 }
4569 else if (S_ISCHR(mode)) {
4570 data->arg.ftype = NF4CHR;
4571 data->arg.u.device.specdata1 = MAJOR(rdev);
4572 data->arg.u.device.specdata2 = MINOR(rdev);
4573 } else if (!S_ISSOCK(mode)) {
4574 status = -EINVAL;
4575 goto out_free;
4576 }
4577
4578 data->arg.label = label;
4579 status = nfs4_do_create(dir, dentry, data);
4580 out_free:
4581 nfs4_free_createdata(data);
4582 out:
4583 return status;
4584 }
4585
4586 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4587 struct iattr *sattr, dev_t rdev)
4588 {
4589 struct nfs_server *server = NFS_SERVER(dir);
4590 struct nfs4_exception exception = { };
4591 struct nfs4_label l, *label = NULL;
4592 int err;
4593
4594 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4595
4596 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4597 sattr->ia_mode &= ~current_umask();
4598 do {
4599 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
4600 trace_nfs4_mknod(dir, &dentry->d_name, err);
4601 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4602 &exception);
4603 } while (exception.retry);
4604
4605 nfs4_label_release_security(label);
4606
4607 return err;
4608 }
4609
4610 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
4611 struct nfs_fsstat *fsstat)
4612 {
4613 struct nfs4_statfs_arg args = {
4614 .fh = fhandle,
4615 .bitmask = server->attr_bitmask,
4616 };
4617 struct nfs4_statfs_res res = {
4618 .fsstat = fsstat,
4619 };
4620 struct rpc_message msg = {
4621 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
4622 .rpc_argp = &args,
4623 .rpc_resp = &res,
4624 };
4625
4626 nfs_fattr_init(fsstat->fattr);
4627 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4628 }
4629
4630 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4631 {
4632 struct nfs4_exception exception = { };
4633 int err;
4634 do {
4635 err = nfs4_handle_exception(server,
4636 _nfs4_proc_statfs(server, fhandle, fsstat),
4637 &exception);
4638 } while (exception.retry);
4639 return err;
4640 }
4641
4642 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4643 struct nfs_fsinfo *fsinfo)
4644 {
4645 struct nfs4_fsinfo_arg args = {
4646 .fh = fhandle,
4647 .bitmask = server->attr_bitmask,
4648 };
4649 struct nfs4_fsinfo_res res = {
4650 .fsinfo = fsinfo,
4651 };
4652 struct rpc_message msg = {
4653 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4654 .rpc_argp = &args,
4655 .rpc_resp = &res,
4656 };
4657
4658 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4659 }
4660
4661 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4662 {
4663 struct nfs4_exception exception = { };
4664 unsigned long now = jiffies;
4665 int err;
4666
4667 do {
4668 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4669 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4670 if (err == 0) {
4671 nfs4_set_lease_period(server->nfs_client,
4672 fsinfo->lease_time * HZ,
4673 now);
4674 break;
4675 }
4676 err = nfs4_handle_exception(server, err, &exception);
4677 } while (exception.retry);
4678 return err;
4679 }
4680
4681 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4682 {
4683 int error;
4684
4685 nfs_fattr_init(fsinfo->fattr);
4686 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4687 if (error == 0) {
4688 /* block layout checks this! */
4689 server->pnfs_blksize = fsinfo->blksize;
4690 set_pnfs_layoutdriver(server, fhandle, fsinfo);
4691 }
4692
4693 return error;
4694 }
4695
4696 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4697 struct nfs_pathconf *pathconf)
4698 {
4699 struct nfs4_pathconf_arg args = {
4700 .fh = fhandle,
4701 .bitmask = server->attr_bitmask,
4702 };
4703 struct nfs4_pathconf_res res = {
4704 .pathconf = pathconf,
4705 };
4706 struct rpc_message msg = {
4707 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4708 .rpc_argp = &args,
4709 .rpc_resp = &res,
4710 };
4711
4712 /* None of the pathconf attributes are mandatory to implement */
4713 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4714 memset(pathconf, 0, sizeof(*pathconf));
4715 return 0;
4716 }
4717
4718 nfs_fattr_init(pathconf->fattr);
4719 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4720 }
4721
4722 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4723 struct nfs_pathconf *pathconf)
4724 {
4725 struct nfs4_exception exception = { };
4726 int err;
4727
4728 do {
4729 err = nfs4_handle_exception(server,
4730 _nfs4_proc_pathconf(server, fhandle, pathconf),
4731 &exception);
4732 } while (exception.retry);
4733 return err;
4734 }
4735
4736 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4737 const struct nfs_open_context *ctx,
4738 const struct nfs_lock_context *l_ctx,
4739 fmode_t fmode)
4740 {
4741 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
4742 }
4743 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4744
4745 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4746 const struct nfs_open_context *ctx,
4747 const struct nfs_lock_context *l_ctx,
4748 fmode_t fmode)
4749 {
4750 nfs4_stateid current_stateid;
4751
4752 /* If the current stateid represents a lost lock, then exit */
4753 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4754 return true;
4755 return nfs4_stateid_match(stateid, &current_stateid);
4756 }
4757
4758 static bool nfs4_error_stateid_expired(int err)
4759 {
4760 switch (err) {
4761 case -NFS4ERR_DELEG_REVOKED:
4762 case -NFS4ERR_ADMIN_REVOKED:
4763 case -NFS4ERR_BAD_STATEID:
4764 case -NFS4ERR_STALE_STATEID:
4765 case -NFS4ERR_OLD_STATEID:
4766 case -NFS4ERR_OPENMODE:
4767 case -NFS4ERR_EXPIRED:
4768 return true;
4769 }
4770 return false;
4771 }
4772
4773 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4774 {
4775 struct nfs_server *server = NFS_SERVER(hdr->inode);
4776
4777 trace_nfs4_read(hdr, task->tk_status);
4778 if (task->tk_status < 0) {
4779 struct nfs4_exception exception = {
4780 .inode = hdr->inode,
4781 .state = hdr->args.context->state,
4782 .stateid = &hdr->args.stateid,
4783 };
4784 task->tk_status = nfs4_async_handle_exception(task,
4785 server, task->tk_status, &exception);
4786 if (exception.retry) {
4787 rpc_restart_call_prepare(task);
4788 return -EAGAIN;
4789 }
4790 }
4791
4792 if (task->tk_status > 0)
4793 renew_lease(server, hdr->timestamp);
4794 return 0;
4795 }
4796
4797 static bool nfs4_read_stateid_changed(struct rpc_task *task,
4798 struct nfs_pgio_args *args)
4799 {
4800
4801 if (!nfs4_error_stateid_expired(task->tk_status) ||
4802 nfs4_stateid_is_current(&args->stateid,
4803 args->context,
4804 args->lock_context,
4805 FMODE_READ))
4806 return false;
4807 rpc_restart_call_prepare(task);
4808 return true;
4809 }
4810
4811 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4812 {
4813
4814 dprintk("--> %s\n", __func__);
4815
4816 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4817 return -EAGAIN;
4818 if (nfs4_read_stateid_changed(task, &hdr->args))
4819 return -EAGAIN;
4820 if (task->tk_status > 0)
4821 nfs_invalidate_atime(hdr->inode);
4822 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4823 nfs4_read_done_cb(task, hdr);
4824 }
4825
4826 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4827 struct rpc_message *msg)
4828 {
4829 hdr->timestamp = jiffies;
4830 if (!hdr->pgio_done_cb)
4831 hdr->pgio_done_cb = nfs4_read_done_cb;
4832 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4833 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4834 }
4835
4836 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4837 struct nfs_pgio_header *hdr)
4838 {
4839 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
4840 &hdr->args.seq_args,
4841 &hdr->res.seq_res,
4842 task))
4843 return 0;
4844 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4845 hdr->args.lock_context,
4846 hdr->rw_mode) == -EIO)
4847 return -EIO;
4848 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4849 return -EIO;
4850 return 0;
4851 }
4852
4853 static int nfs4_write_done_cb(struct rpc_task *task,
4854 struct nfs_pgio_header *hdr)
4855 {
4856 struct inode *inode = hdr->inode;
4857
4858 trace_nfs4_write(hdr, task->tk_status);
4859 if (task->tk_status < 0) {
4860 struct nfs4_exception exception = {
4861 .inode = hdr->inode,
4862 .state = hdr->args.context->state,
4863 .stateid = &hdr->args.stateid,
4864 };
4865 task->tk_status = nfs4_async_handle_exception(task,
4866 NFS_SERVER(inode), task->tk_status,
4867 &exception);
4868 if (exception.retry) {
4869 rpc_restart_call_prepare(task);
4870 return -EAGAIN;
4871 }
4872 }
4873 if (task->tk_status >= 0) {
4874 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4875 nfs_writeback_update_inode(hdr);
4876 }
4877 return 0;
4878 }
4879
4880 static bool nfs4_write_stateid_changed(struct rpc_task *task,
4881 struct nfs_pgio_args *args)
4882 {
4883
4884 if (!nfs4_error_stateid_expired(task->tk_status) ||
4885 nfs4_stateid_is_current(&args->stateid,
4886 args->context,
4887 args->lock_context,
4888 FMODE_WRITE))
4889 return false;
4890 rpc_restart_call_prepare(task);
4891 return true;
4892 }
4893
4894 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4895 {
4896 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4897 return -EAGAIN;
4898 if (nfs4_write_stateid_changed(task, &hdr->args))
4899 return -EAGAIN;
4900 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4901 nfs4_write_done_cb(task, hdr);
4902 }
4903
4904 static
4905 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4906 {
4907 /* Don't request attributes for pNFS or O_DIRECT writes */
4908 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4909 return false;
4910 /* Otherwise, request attributes if and only if we don't hold
4911 * a delegation
4912 */
4913 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4914 }
4915
4916 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4917 struct rpc_message *msg)
4918 {
4919 struct nfs_server *server = NFS_SERVER(hdr->inode);
4920
4921 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4922 hdr->args.bitmask = NULL;
4923 hdr->res.fattr = NULL;
4924 } else
4925 hdr->args.bitmask = server->cache_consistency_bitmask;
4926
4927 if (!hdr->pgio_done_cb)
4928 hdr->pgio_done_cb = nfs4_write_done_cb;
4929 hdr->res.server = server;
4930 hdr->timestamp = jiffies;
4931
4932 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4933 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4934 }
4935
4936 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4937 {
4938 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
4939 &data->args.seq_args,
4940 &data->res.seq_res,
4941 task);
4942 }
4943
4944 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4945 {
4946 struct inode *inode = data->inode;
4947
4948 trace_nfs4_commit(data, task->tk_status);
4949 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4950 NULL, NULL) == -EAGAIN) {
4951 rpc_restart_call_prepare(task);
4952 return -EAGAIN;
4953 }
4954 return 0;
4955 }
4956
4957 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4958 {
4959 if (!nfs4_sequence_done(task, &data->res.seq_res))
4960 return -EAGAIN;
4961 return data->commit_done_cb(task, data);
4962 }
4963
4964 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4965 {
4966 struct nfs_server *server = NFS_SERVER(data->inode);
4967
4968 if (data->commit_done_cb == NULL)
4969 data->commit_done_cb = nfs4_commit_done_cb;
4970 data->res.server = server;
4971 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4972 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4973 }
4974
4975 struct nfs4_renewdata {
4976 struct nfs_client *client;
4977 unsigned long timestamp;
4978 };
4979
4980 /*
4981 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4982 * standalone procedure for queueing an asynchronous RENEW.
4983 */
4984 static void nfs4_renew_release(void *calldata)
4985 {
4986 struct nfs4_renewdata *data = calldata;
4987 struct nfs_client *clp = data->client;
4988
4989 if (refcount_read(&clp->cl_count) > 1)
4990 nfs4_schedule_state_renewal(clp);
4991 nfs_put_client(clp);
4992 kfree(data);
4993 }
4994
4995 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4996 {
4997 struct nfs4_renewdata *data = calldata;
4998 struct nfs_client *clp = data->client;
4999 unsigned long timestamp = data->timestamp;
5000
5001 trace_nfs4_renew_async(clp, task->tk_status);
5002 switch (task->tk_status) {
5003 case 0:
5004 break;
5005 case -NFS4ERR_LEASE_MOVED:
5006 nfs4_schedule_lease_moved_recovery(clp);
5007 break;
5008 default:
5009 /* Unless we're shutting down, schedule state recovery! */
5010 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
5011 return;
5012 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
5013 nfs4_schedule_lease_recovery(clp);
5014 return;
5015 }
5016 nfs4_schedule_path_down_recovery(clp);
5017 }
5018 do_renew_lease(clp, timestamp);
5019 }
5020
5021 static const struct rpc_call_ops nfs4_renew_ops = {
5022 .rpc_call_done = nfs4_renew_done,
5023 .rpc_release = nfs4_renew_release,
5024 };
5025
5026 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
5027 {
5028 struct rpc_message msg = {
5029 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5030 .rpc_argp = clp,
5031 .rpc_cred = cred,
5032 };
5033 struct nfs4_renewdata *data;
5034
5035 if (renew_flags == 0)
5036 return 0;
5037 if (!refcount_inc_not_zero(&clp->cl_count))
5038 return -EIO;
5039 data = kmalloc(sizeof(*data), GFP_NOFS);
5040 if (data == NULL) {
5041 nfs_put_client(clp);
5042 return -ENOMEM;
5043 }
5044 data->client = clp;
5045 data->timestamp = jiffies;
5046 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
5047 &nfs4_renew_ops, data);
5048 }
5049
5050 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
5051 {
5052 struct rpc_message msg = {
5053 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5054 .rpc_argp = clp,
5055 .rpc_cred = cred,
5056 };
5057 unsigned long now = jiffies;
5058 int status;
5059
5060 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5061 if (status < 0)
5062 return status;
5063 do_renew_lease(clp, now);
5064 return 0;
5065 }
5066
5067 static inline int nfs4_server_supports_acls(struct nfs_server *server)
5068 {
5069 return server->caps & NFS_CAP_ACLS;
5070 }
5071
5072 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
5073 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
5074 * the stack.
5075 */
5076 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
5077
5078 static int buf_to_pages_noslab(const void *buf, size_t buflen,
5079 struct page **pages)
5080 {
5081 struct page *newpage, **spages;
5082 int rc = 0;
5083 size_t len;
5084 spages = pages;
5085
5086 do {
5087 len = min_t(size_t, PAGE_SIZE, buflen);
5088 newpage = alloc_page(GFP_KERNEL);
5089
5090 if (newpage == NULL)
5091 goto unwind;
5092 memcpy(page_address(newpage), buf, len);
5093 buf += len;
5094 buflen -= len;
5095 *pages++ = newpage;
5096 rc++;
5097 } while (buflen != 0);
5098
5099 return rc;
5100
5101 unwind:
5102 for(; rc > 0; rc--)
5103 __free_page(spages[rc-1]);
5104 return -ENOMEM;
5105 }
5106
5107 struct nfs4_cached_acl {
5108 int cached;
5109 size_t len;
5110 char data[0];
5111 };
5112
5113 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
5114 {
5115 struct nfs_inode *nfsi = NFS_I(inode);
5116
5117 spin_lock(&inode->i_lock);
5118 kfree(nfsi->nfs4_acl);
5119 nfsi->nfs4_acl = acl;
5120 spin_unlock(&inode->i_lock);
5121 }
5122
5123 static void nfs4_zap_acl_attr(struct inode *inode)
5124 {
5125 nfs4_set_cached_acl(inode, NULL);
5126 }
5127
5128 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
5129 {
5130 struct nfs_inode *nfsi = NFS_I(inode);
5131 struct nfs4_cached_acl *acl;
5132 int ret = -ENOENT;
5133
5134 spin_lock(&inode->i_lock);
5135 acl = nfsi->nfs4_acl;
5136 if (acl == NULL)
5137 goto out;
5138 if (buf == NULL) /* user is just asking for length */
5139 goto out_len;
5140 if (acl->cached == 0)
5141 goto out;
5142 ret = -ERANGE; /* see getxattr(2) man page */
5143 if (acl->len > buflen)
5144 goto out;
5145 memcpy(buf, acl->data, acl->len);
5146 out_len:
5147 ret = acl->len;
5148 out:
5149 spin_unlock(&inode->i_lock);
5150 return ret;
5151 }
5152
5153 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
5154 {
5155 struct nfs4_cached_acl *acl;
5156 size_t buflen = sizeof(*acl) + acl_len;
5157
5158 if (buflen <= PAGE_SIZE) {
5159 acl = kmalloc(buflen, GFP_KERNEL);
5160 if (acl == NULL)
5161 goto out;
5162 acl->cached = 1;
5163 _copy_from_pages(acl->data, pages, pgbase, acl_len);
5164 } else {
5165 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
5166 if (acl == NULL)
5167 goto out;
5168 acl->cached = 0;
5169 }
5170 acl->len = acl_len;
5171 out:
5172 nfs4_set_cached_acl(inode, acl);
5173 }
5174
5175 /*
5176 * The getxattr API returns the required buffer length when called with a
5177 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
5178 * the required buf. On a NULL buf, we send a page of data to the server
5179 * guessing that the ACL request can be serviced by a page. If so, we cache
5180 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
5181 * the cache. If not so, we throw away the page, and cache the required
5182 * length. The next getxattr call will then produce another round trip to
5183 * the server, this time with the input buf of the required size.
5184 */
5185 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5186 {
5187 struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
5188 struct nfs_getaclargs args = {
5189 .fh = NFS_FH(inode),
5190 .acl_pages = pages,
5191 .acl_len = buflen,
5192 };
5193 struct nfs_getaclres res = {
5194 .acl_len = buflen,
5195 };
5196 struct rpc_message msg = {
5197 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
5198 .rpc_argp = &args,
5199 .rpc_resp = &res,
5200 };
5201 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
5202 int ret = -ENOMEM, i;
5203
5204 if (npages > ARRAY_SIZE(pages))
5205 return -ERANGE;
5206
5207 for (i = 0; i < npages; i++) {
5208 pages[i] = alloc_page(GFP_KERNEL);
5209 if (!pages[i])
5210 goto out_free;
5211 }
5212
5213 /* for decoding across pages */
5214 res.acl_scratch = alloc_page(GFP_KERNEL);
5215 if (!res.acl_scratch)
5216 goto out_free;
5217
5218 args.acl_len = npages * PAGE_SIZE;
5219
5220 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
5221 __func__, buf, buflen, npages, args.acl_len);
5222 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
5223 &msg, &args.seq_args, &res.seq_res, 0);
5224 if (ret)
5225 goto out_free;
5226
5227 /* Handle the case where the passed-in buffer is too short */
5228 if (res.acl_flags & NFS4_ACL_TRUNC) {
5229 /* Did the user only issue a request for the acl length? */
5230 if (buf == NULL)
5231 goto out_ok;
5232 ret = -ERANGE;
5233 goto out_free;
5234 }
5235 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
5236 if (buf) {
5237 if (res.acl_len > buflen) {
5238 ret = -ERANGE;
5239 goto out_free;
5240 }
5241 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
5242 }
5243 out_ok:
5244 ret = res.acl_len;
5245 out_free:
5246 for (i = 0; i < npages; i++)
5247 if (pages[i])
5248 __free_page(pages[i]);
5249 if (res.acl_scratch)
5250 __free_page(res.acl_scratch);
5251 return ret;
5252 }
5253
5254 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5255 {
5256 struct nfs4_exception exception = { };
5257 ssize_t ret;
5258 do {
5259 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
5260 trace_nfs4_get_acl(inode, ret);
5261 if (ret >= 0)
5262 break;
5263 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
5264 } while (exception.retry);
5265 return ret;
5266 }
5267
5268 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
5269 {
5270 struct nfs_server *server = NFS_SERVER(inode);
5271 int ret;
5272
5273 if (!nfs4_server_supports_acls(server))
5274 return -EOPNOTSUPP;
5275 ret = nfs_revalidate_inode(server, inode);
5276 if (ret < 0)
5277 return ret;
5278 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
5279 nfs_zap_acl_cache(inode);
5280 ret = nfs4_read_cached_acl(inode, buf, buflen);
5281 if (ret != -ENOENT)
5282 /* -ENOENT is returned if there is no ACL or if there is an ACL
5283 * but no cached acl data, just the acl length */
5284 return ret;
5285 return nfs4_get_acl_uncached(inode, buf, buflen);
5286 }
5287
5288 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
5289 {
5290 struct nfs_server *server = NFS_SERVER(inode);
5291 struct page *pages[NFS4ACL_MAXPAGES];
5292 struct nfs_setaclargs arg = {
5293 .fh = NFS_FH(inode),
5294 .acl_pages = pages,
5295 .acl_len = buflen,
5296 };
5297 struct nfs_setaclres res;
5298 struct rpc_message msg = {
5299 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
5300 .rpc_argp = &arg,
5301 .rpc_resp = &res,
5302 };
5303 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
5304 int ret, i;
5305
5306 if (!nfs4_server_supports_acls(server))
5307 return -EOPNOTSUPP;
5308 if (npages > ARRAY_SIZE(pages))
5309 return -ERANGE;
5310 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages);
5311 if (i < 0)
5312 return i;
5313 nfs4_inode_return_delegation(inode);
5314 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5315
5316 /*
5317 * Free each page after tx, so the only ref left is
5318 * held by the network stack
5319 */
5320 for (; i > 0; i--)
5321 put_page(pages[i-1]);
5322
5323 /*
5324 * Acl update can result in inode attribute update.
5325 * so mark the attribute cache invalid.
5326 */
5327 spin_lock(&inode->i_lock);
5328 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
5329 spin_unlock(&inode->i_lock);
5330 nfs_access_zap_cache(inode);
5331 nfs_zap_acl_cache(inode);
5332 return ret;
5333 }
5334
5335 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
5336 {
5337 struct nfs4_exception exception = { };
5338 int err;
5339 do {
5340 err = __nfs4_proc_set_acl(inode, buf, buflen);
5341 trace_nfs4_set_acl(inode, err);
5342 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5343 &exception);
5344 } while (exception.retry);
5345 return err;
5346 }
5347
5348 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
5349 static int _nfs4_get_security_label(struct inode *inode, void *buf,
5350 size_t buflen)
5351 {
5352 struct nfs_server *server = NFS_SERVER(inode);
5353 struct nfs_fattr fattr;
5354 struct nfs4_label label = {0, 0, buflen, buf};
5355
5356 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
5357 struct nfs4_getattr_arg arg = {
5358 .fh = NFS_FH(inode),
5359 .bitmask = bitmask,
5360 };
5361 struct nfs4_getattr_res res = {
5362 .fattr = &fattr,
5363 .label = &label,
5364 .server = server,
5365 };
5366 struct rpc_message msg = {
5367 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
5368 .rpc_argp = &arg,
5369 .rpc_resp = &res,
5370 };
5371 int ret;
5372
5373 nfs_fattr_init(&fattr);
5374
5375 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
5376 if (ret)
5377 return ret;
5378 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
5379 return -ENOENT;
5380 if (buflen < label.len)
5381 return -ERANGE;
5382 return 0;
5383 }
5384
5385 static int nfs4_get_security_label(struct inode *inode, void *buf,
5386 size_t buflen)
5387 {
5388 struct nfs4_exception exception = { };
5389 int err;
5390
5391 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5392 return -EOPNOTSUPP;
5393
5394 do {
5395 err = _nfs4_get_security_label(inode, buf, buflen);
5396 trace_nfs4_get_security_label(inode, err);
5397 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5398 &exception);
5399 } while (exception.retry);
5400 return err;
5401 }
5402
5403 static int _nfs4_do_set_security_label(struct inode *inode,
5404 struct nfs4_label *ilabel,
5405 struct nfs_fattr *fattr,
5406 struct nfs4_label *olabel)
5407 {
5408
5409 struct iattr sattr = {0};
5410 struct nfs_server *server = NFS_SERVER(inode);
5411 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
5412 struct nfs_setattrargs arg = {
5413 .fh = NFS_FH(inode),
5414 .iap = &sattr,
5415 .server = server,
5416 .bitmask = bitmask,
5417 .label = ilabel,
5418 };
5419 struct nfs_setattrres res = {
5420 .fattr = fattr,
5421 .label = olabel,
5422 .server = server,
5423 };
5424 struct rpc_message msg = {
5425 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
5426 .rpc_argp = &arg,
5427 .rpc_resp = &res,
5428 };
5429 int status;
5430
5431 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
5432
5433 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5434 if (status)
5435 dprintk("%s failed: %d\n", __func__, status);
5436
5437 return status;
5438 }
5439
5440 static int nfs4_do_set_security_label(struct inode *inode,
5441 struct nfs4_label *ilabel,
5442 struct nfs_fattr *fattr,
5443 struct nfs4_label *olabel)
5444 {
5445 struct nfs4_exception exception = { };
5446 int err;
5447
5448 do {
5449 err = _nfs4_do_set_security_label(inode, ilabel,
5450 fattr, olabel);
5451 trace_nfs4_set_security_label(inode, err);
5452 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5453 &exception);
5454 } while (exception.retry);
5455 return err;
5456 }
5457
5458 static int
5459 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
5460 {
5461 struct nfs4_label ilabel, *olabel = NULL;
5462 struct nfs_fattr fattr;
5463 struct rpc_cred *cred;
5464 int status;
5465
5466 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5467 return -EOPNOTSUPP;
5468
5469 nfs_fattr_init(&fattr);
5470
5471 ilabel.pi = 0;
5472 ilabel.lfs = 0;
5473 ilabel.label = (char *)buf;
5474 ilabel.len = buflen;
5475
5476 cred = rpc_lookup_cred();
5477 if (IS_ERR(cred))
5478 return PTR_ERR(cred);
5479
5480 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
5481 if (IS_ERR(olabel)) {
5482 status = -PTR_ERR(olabel);
5483 goto out;
5484 }
5485
5486 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
5487 if (status == 0)
5488 nfs_setsecurity(inode, &fattr, olabel);
5489
5490 nfs4_label_free(olabel);
5491 out:
5492 put_rpccred(cred);
5493 return status;
5494 }
5495 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
5496
5497
5498 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
5499 nfs4_verifier *bootverf)
5500 {
5501 __be32 verf[2];
5502
5503 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
5504 /* An impossible timestamp guarantees this value
5505 * will never match a generated boot time. */
5506 verf[0] = cpu_to_be32(U32_MAX);
5507 verf[1] = cpu_to_be32(U32_MAX);
5508 } else {
5509 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
5510 u64 ns = ktime_to_ns(nn->boot_time);
5511
5512 verf[0] = cpu_to_be32(ns >> 32);
5513 verf[1] = cpu_to_be32(ns);
5514 }
5515 memcpy(bootverf->data, verf, sizeof(bootverf->data));
5516 }
5517
5518 static int
5519 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
5520 {
5521 size_t len;
5522 char *str;
5523
5524 if (clp->cl_owner_id != NULL)
5525 return 0;
5526
5527 rcu_read_lock();
5528 len = 14 + strlen(clp->cl_ipaddr) + 1 +
5529 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
5530 1 +
5531 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
5532 1;
5533 rcu_read_unlock();
5534
5535 if (len > NFS4_OPAQUE_LIMIT + 1)
5536 return -EINVAL;
5537
5538 /*
5539 * Since this string is allocated at mount time, and held until the
5540 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5541 * about a memory-reclaim deadlock.
5542 */
5543 str = kmalloc(len, GFP_KERNEL);
5544 if (!str)
5545 return -ENOMEM;
5546
5547 rcu_read_lock();
5548 scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
5549 clp->cl_ipaddr,
5550 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
5551 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
5552 rcu_read_unlock();
5553
5554 clp->cl_owner_id = str;
5555 return 0;
5556 }
5557
5558 static int
5559 nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5560 {
5561 size_t len;
5562 char *str;
5563
5564 len = 10 + 10 + 1 + 10 + 1 +
5565 strlen(nfs4_client_id_uniquifier) + 1 +
5566 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5567
5568 if (len > NFS4_OPAQUE_LIMIT + 1)
5569 return -EINVAL;
5570
5571 /*
5572 * Since this string is allocated at mount time, and held until the
5573 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5574 * about a memory-reclaim deadlock.
5575 */
5576 str = kmalloc(len, GFP_KERNEL);
5577 if (!str)
5578 return -ENOMEM;
5579
5580 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5581 clp->rpc_ops->version, clp->cl_minorversion,
5582 nfs4_client_id_uniquifier,
5583 clp->cl_rpcclient->cl_nodename);
5584 clp->cl_owner_id = str;
5585 return 0;
5586 }
5587
5588 static int
5589 nfs4_init_uniform_client_string(struct nfs_client *clp)
5590 {
5591 size_t len;
5592 char *str;
5593
5594 if (clp->cl_owner_id != NULL)
5595 return 0;
5596
5597 if (nfs4_client_id_uniquifier[0] != '\0')
5598 return nfs4_init_uniquifier_client_string(clp);
5599
5600 len = 10 + 10 + 1 + 10 + 1 +
5601 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5602
5603 if (len > NFS4_OPAQUE_LIMIT + 1)
5604 return -EINVAL;
5605
5606 /*
5607 * Since this string is allocated at mount time, and held until the
5608 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5609 * about a memory-reclaim deadlock.
5610 */
5611 str = kmalloc(len, GFP_KERNEL);
5612 if (!str)
5613 return -ENOMEM;
5614
5615 scnprintf(str, len, "Linux NFSv%u.%u %s",
5616 clp->rpc_ops->version, clp->cl_minorversion,
5617 clp->cl_rpcclient->cl_nodename);
5618 clp->cl_owner_id = str;
5619 return 0;
5620 }
5621
5622 /*
5623 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5624 * services. Advertise one based on the address family of the
5625 * clientaddr.
5626 */
5627 static unsigned int
5628 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5629 {
5630 if (strchr(clp->cl_ipaddr, ':') != NULL)
5631 return scnprintf(buf, len, "tcp6");
5632 else
5633 return scnprintf(buf, len, "tcp");
5634 }
5635
5636 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5637 {
5638 struct nfs4_setclientid *sc = calldata;
5639
5640 if (task->tk_status == 0)
5641 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5642 }
5643
5644 static const struct rpc_call_ops nfs4_setclientid_ops = {
5645 .rpc_call_done = nfs4_setclientid_done,
5646 };
5647
5648 /**
5649 * nfs4_proc_setclientid - Negotiate client ID
5650 * @clp: state data structure
5651 * @program: RPC program for NFSv4 callback service
5652 * @port: IP port number for NFS4 callback service
5653 * @cred: RPC credential to use for this call
5654 * @res: where to place the result
5655 *
5656 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5657 */
5658 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5659 unsigned short port, struct rpc_cred *cred,
5660 struct nfs4_setclientid_res *res)
5661 {
5662 nfs4_verifier sc_verifier;
5663 struct nfs4_setclientid setclientid = {
5664 .sc_verifier = &sc_verifier,
5665 .sc_prog = program,
5666 .sc_clnt = clp,
5667 };
5668 struct rpc_message msg = {
5669 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5670 .rpc_argp = &setclientid,
5671 .rpc_resp = res,
5672 .rpc_cred = cred,
5673 };
5674 struct rpc_task *task;
5675 struct rpc_task_setup task_setup_data = {
5676 .rpc_client = clp->cl_rpcclient,
5677 .rpc_message = &msg,
5678 .callback_ops = &nfs4_setclientid_ops,
5679 .callback_data = &setclientid,
5680 .flags = RPC_TASK_TIMEOUT,
5681 };
5682 int status;
5683
5684 /* nfs_client_id4 */
5685 nfs4_init_boot_verifier(clp, &sc_verifier);
5686
5687 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5688 status = nfs4_init_uniform_client_string(clp);
5689 else
5690 status = nfs4_init_nonuniform_client_string(clp);
5691
5692 if (status)
5693 goto out;
5694
5695 /* cb_client4 */
5696 setclientid.sc_netid_len =
5697 nfs4_init_callback_netid(clp,
5698 setclientid.sc_netid,
5699 sizeof(setclientid.sc_netid));
5700 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5701 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5702 clp->cl_ipaddr, port >> 8, port & 255);
5703
5704 dprintk("NFS call setclientid auth=%s, '%s'\n",
5705 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5706 clp->cl_owner_id);
5707 task = rpc_run_task(&task_setup_data);
5708 if (IS_ERR(task)) {
5709 status = PTR_ERR(task);
5710 goto out;
5711 }
5712 status = task->tk_status;
5713 if (setclientid.sc_cred) {
5714 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5715 put_rpccred(setclientid.sc_cred);
5716 }
5717 rpc_put_task(task);
5718 out:
5719 trace_nfs4_setclientid(clp, status);
5720 dprintk("NFS reply setclientid: %d\n", status);
5721 return status;
5722 }
5723
5724 /**
5725 * nfs4_proc_setclientid_confirm - Confirm client ID
5726 * @clp: state data structure
5727 * @res: result of a previous SETCLIENTID
5728 * @cred: RPC credential to use for this call
5729 *
5730 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5731 */
5732 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5733 struct nfs4_setclientid_res *arg,
5734 struct rpc_cred *cred)
5735 {
5736 struct rpc_message msg = {
5737 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5738 .rpc_argp = arg,
5739 .rpc_cred = cred,
5740 };
5741 int status;
5742
5743 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5744 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5745 clp->cl_clientid);
5746 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5747 trace_nfs4_setclientid_confirm(clp, status);
5748 dprintk("NFS reply setclientid_confirm: %d\n", status);
5749 return status;
5750 }
5751
5752 struct nfs4_delegreturndata {
5753 struct nfs4_delegreturnargs args;
5754 struct nfs4_delegreturnres res;
5755 struct nfs_fh fh;
5756 nfs4_stateid stateid;
5757 unsigned long timestamp;
5758 struct {
5759 struct nfs4_layoutreturn_args arg;
5760 struct nfs4_layoutreturn_res res;
5761 struct nfs4_xdr_opaque_data ld_private;
5762 u32 roc_barrier;
5763 bool roc;
5764 } lr;
5765 struct nfs_fattr fattr;
5766 int rpc_status;
5767 struct inode *inode;
5768 };
5769
5770 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5771 {
5772 struct nfs4_delegreturndata *data = calldata;
5773 struct nfs4_exception exception = {
5774 .inode = data->inode,
5775 .stateid = &data->stateid,
5776 };
5777
5778 if (!nfs4_sequence_done(task, &data->res.seq_res))
5779 return;
5780
5781 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5782
5783 /* Handle Layoutreturn errors */
5784 if (data->args.lr_args && task->tk_status != 0) {
5785 switch(data->res.lr_ret) {
5786 default:
5787 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
5788 break;
5789 case 0:
5790 data->args.lr_args = NULL;
5791 data->res.lr_res = NULL;
5792 break;
5793 case -NFS4ERR_OLD_STATEID:
5794 if (nfs4_refresh_layout_stateid(&data->args.lr_args->stateid,
5795 data->inode))
5796 goto lr_restart;
5797 /* Fallthrough */
5798 case -NFS4ERR_ADMIN_REVOKED:
5799 case -NFS4ERR_DELEG_REVOKED:
5800 case -NFS4ERR_EXPIRED:
5801 case -NFS4ERR_BAD_STATEID:
5802 case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
5803 case -NFS4ERR_WRONG_CRED:
5804 data->args.lr_args = NULL;
5805 data->res.lr_res = NULL;
5806 goto lr_restart;
5807 }
5808 }
5809
5810 switch (task->tk_status) {
5811 case 0:
5812 renew_lease(data->res.server, data->timestamp);
5813 break;
5814 case -NFS4ERR_ADMIN_REVOKED:
5815 case -NFS4ERR_DELEG_REVOKED:
5816 case -NFS4ERR_EXPIRED:
5817 nfs4_free_revoked_stateid(data->res.server,
5818 data->args.stateid,
5819 task->tk_msg.rpc_cred);
5820 /* Fallthrough */
5821 case -NFS4ERR_BAD_STATEID:
5822 case -NFS4ERR_STALE_STATEID:
5823 task->tk_status = 0;
5824 break;
5825 case -NFS4ERR_OLD_STATEID:
5826 if (nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
5827 goto out_restart;
5828 task->tk_status = 0;
5829 break;
5830 case -NFS4ERR_ACCESS:
5831 if (data->args.bitmask) {
5832 data->args.bitmask = NULL;
5833 data->res.fattr = NULL;
5834 goto out_restart;
5835 }
5836 /* Fallthrough */
5837 default:
5838 task->tk_status = nfs4_async_handle_exception(task,
5839 data->res.server, task->tk_status,
5840 &exception);
5841 if (exception.retry)
5842 goto out_restart;
5843 }
5844 data->rpc_status = task->tk_status;
5845 return;
5846 lr_restart:
5847 data->res.lr_ret = 0;
5848 out_restart:
5849 task->tk_status = 0;
5850 rpc_restart_call_prepare(task);
5851 }
5852
5853 static void nfs4_delegreturn_release(void *calldata)
5854 {
5855 struct nfs4_delegreturndata *data = calldata;
5856 struct inode *inode = data->inode;
5857
5858 if (inode) {
5859 if (data->lr.roc)
5860 pnfs_roc_release(&data->lr.arg, &data->lr.res,
5861 data->res.lr_ret);
5862 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5863 nfs_iput_and_deactive(inode);
5864 }
5865 kfree(calldata);
5866 }
5867
5868 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5869 {
5870 struct nfs4_delegreturndata *d_data;
5871
5872 d_data = (struct nfs4_delegreturndata *)data;
5873
5874 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
5875 return;
5876
5877 nfs4_setup_sequence(d_data->res.server->nfs_client,
5878 &d_data->args.seq_args,
5879 &d_data->res.seq_res,
5880 task);
5881 }
5882
5883 static const struct rpc_call_ops nfs4_delegreturn_ops = {
5884 .rpc_call_prepare = nfs4_delegreturn_prepare,
5885 .rpc_call_done = nfs4_delegreturn_done,
5886 .rpc_release = nfs4_delegreturn_release,
5887 };
5888
5889 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5890 {
5891 struct nfs4_delegreturndata *data;
5892 struct nfs_server *server = NFS_SERVER(inode);
5893 struct rpc_task *task;
5894 struct rpc_message msg = {
5895 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5896 .rpc_cred = cred,
5897 };
5898 struct rpc_task_setup task_setup_data = {
5899 .rpc_client = server->client,
5900 .rpc_message = &msg,
5901 .callback_ops = &nfs4_delegreturn_ops,
5902 .flags = RPC_TASK_ASYNC,
5903 };
5904 int status = 0;
5905
5906 data = kzalloc(sizeof(*data), GFP_NOFS);
5907 if (data == NULL)
5908 return -ENOMEM;
5909 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5910
5911 nfs4_state_protect(server->nfs_client,
5912 NFS_SP4_MACH_CRED_CLEANUP,
5913 &task_setup_data.rpc_client, &msg);
5914
5915 data->args.fhandle = &data->fh;
5916 data->args.stateid = &data->stateid;
5917 data->args.bitmask = server->cache_consistency_bitmask;
5918 nfs_copy_fh(&data->fh, NFS_FH(inode));
5919 nfs4_stateid_copy(&data->stateid, stateid);
5920 data->res.fattr = &data->fattr;
5921 data->res.server = server;
5922 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
5923 data->lr.arg.ld_private = &data->lr.ld_private;
5924 nfs_fattr_init(data->res.fattr);
5925 data->timestamp = jiffies;
5926 data->rpc_status = 0;
5927 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, cred);
5928 data->inode = nfs_igrab_and_active(inode);
5929 if (data->inode) {
5930 if (data->lr.roc) {
5931 data->args.lr_args = &data->lr.arg;
5932 data->res.lr_res = &data->lr.res;
5933 }
5934 } else if (data->lr.roc) {
5935 pnfs_roc_release(&data->lr.arg, &data->lr.res, 0);
5936 data->lr.roc = false;
5937 }
5938
5939 task_setup_data.callback_data = data;
5940 msg.rpc_argp = &data->args;
5941 msg.rpc_resp = &data->res;
5942 task = rpc_run_task(&task_setup_data);
5943 if (IS_ERR(task))
5944 return PTR_ERR(task);
5945 if (!issync)
5946 goto out;
5947 status = rpc_wait_for_completion_task(task);
5948 if (status != 0)
5949 goto out;
5950 status = data->rpc_status;
5951 out:
5952 rpc_put_task(task);
5953 return status;
5954 }
5955
5956 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5957 {
5958 struct nfs_server *server = NFS_SERVER(inode);
5959 struct nfs4_exception exception = { };
5960 int err;
5961 do {
5962 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5963 trace_nfs4_delegreturn(inode, stateid, err);
5964 switch (err) {
5965 case -NFS4ERR_STALE_STATEID:
5966 case -NFS4ERR_EXPIRED:
5967 case 0:
5968 return 0;
5969 }
5970 err = nfs4_handle_exception(server, err, &exception);
5971 } while (exception.retry);
5972 return err;
5973 }
5974
5975 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5976 {
5977 struct inode *inode = state->inode;
5978 struct nfs_server *server = NFS_SERVER(inode);
5979 struct nfs_client *clp = server->nfs_client;
5980 struct nfs_lockt_args arg = {
5981 .fh = NFS_FH(inode),
5982 .fl = request,
5983 };
5984 struct nfs_lockt_res res = {
5985 .denied = request,
5986 };
5987 struct rpc_message msg = {
5988 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5989 .rpc_argp = &arg,
5990 .rpc_resp = &res,
5991 .rpc_cred = state->owner->so_cred,
5992 };
5993 struct nfs4_lock_state *lsp;
5994 int status;
5995
5996 arg.lock_owner.clientid = clp->cl_clientid;
5997 status = nfs4_set_lock_state(state, request);
5998 if (status != 0)
5999 goto out;
6000 lsp = request->fl_u.nfs4_fl.owner;
6001 arg.lock_owner.id = lsp->ls_seqid.owner_id;
6002 arg.lock_owner.s_dev = server->s_dev;
6003 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6004 switch (status) {
6005 case 0:
6006 request->fl_type = F_UNLCK;
6007 break;
6008 case -NFS4ERR_DENIED:
6009 status = 0;
6010 }
6011 request->fl_ops->fl_release_private(request);
6012 request->fl_ops = NULL;
6013 out:
6014 return status;
6015 }
6016
6017 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6018 {
6019 struct nfs4_exception exception = { };
6020 int err;
6021
6022 do {
6023 err = _nfs4_proc_getlk(state, cmd, request);
6024 trace_nfs4_get_lock(request, state, cmd, err);
6025 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
6026 &exception);
6027 } while (exception.retry);
6028 return err;
6029 }
6030
6031 struct nfs4_unlockdata {
6032 struct nfs_locku_args arg;
6033 struct nfs_locku_res res;
6034 struct nfs4_lock_state *lsp;
6035 struct nfs_open_context *ctx;
6036 struct nfs_lock_context *l_ctx;
6037 struct file_lock fl;
6038 struct nfs_server *server;
6039 unsigned long timestamp;
6040 };
6041
6042 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
6043 struct nfs_open_context *ctx,
6044 struct nfs4_lock_state *lsp,
6045 struct nfs_seqid *seqid)
6046 {
6047 struct nfs4_unlockdata *p;
6048 struct inode *inode = lsp->ls_state->inode;
6049
6050 p = kzalloc(sizeof(*p), GFP_NOFS);
6051 if (p == NULL)
6052 return NULL;
6053 p->arg.fh = NFS_FH(inode);
6054 p->arg.fl = &p->fl;
6055 p->arg.seqid = seqid;
6056 p->res.seqid = seqid;
6057 p->lsp = lsp;
6058 refcount_inc(&lsp->ls_count);
6059 /* Ensure we don't close file until we're done freeing locks! */
6060 p->ctx = get_nfs_open_context(ctx);
6061 p->l_ctx = nfs_get_lock_context(ctx);
6062 memcpy(&p->fl, fl, sizeof(p->fl));
6063 p->server = NFS_SERVER(inode);
6064 return p;
6065 }
6066
6067 static void nfs4_locku_release_calldata(void *data)
6068 {
6069 struct nfs4_unlockdata *calldata = data;
6070 nfs_free_seqid(calldata->arg.seqid);
6071 nfs4_put_lock_state(calldata->lsp);
6072 nfs_put_lock_context(calldata->l_ctx);
6073 put_nfs_open_context(calldata->ctx);
6074 kfree(calldata);
6075 }
6076
6077 static void nfs4_locku_done(struct rpc_task *task, void *data)
6078 {
6079 struct nfs4_unlockdata *calldata = data;
6080 struct nfs4_exception exception = {
6081 .inode = calldata->lsp->ls_state->inode,
6082 .stateid = &calldata->arg.stateid,
6083 };
6084
6085 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
6086 return;
6087 switch (task->tk_status) {
6088 case 0:
6089 renew_lease(calldata->server, calldata->timestamp);
6090 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
6091 if (nfs4_update_lock_stateid(calldata->lsp,
6092 &calldata->res.stateid))
6093 break;
6094 case -NFS4ERR_ADMIN_REVOKED:
6095 case -NFS4ERR_EXPIRED:
6096 nfs4_free_revoked_stateid(calldata->server,
6097 &calldata->arg.stateid,
6098 task->tk_msg.rpc_cred);
6099 case -NFS4ERR_BAD_STATEID:
6100 case -NFS4ERR_OLD_STATEID:
6101 case -NFS4ERR_STALE_STATEID:
6102 if (!nfs4_stateid_match(&calldata->arg.stateid,
6103 &calldata->lsp->ls_stateid))
6104 rpc_restart_call_prepare(task);
6105 break;
6106 default:
6107 task->tk_status = nfs4_async_handle_exception(task,
6108 calldata->server, task->tk_status,
6109 &exception);
6110 if (exception.retry)
6111 rpc_restart_call_prepare(task);
6112 }
6113 nfs_release_seqid(calldata->arg.seqid);
6114 }
6115
6116 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
6117 {
6118 struct nfs4_unlockdata *calldata = data;
6119
6120 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
6121 nfs_async_iocounter_wait(task, calldata->l_ctx))
6122 return;
6123
6124 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
6125 goto out_wait;
6126 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
6127 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
6128 /* Note: exit _without_ running nfs4_locku_done */
6129 goto out_no_action;
6130 }
6131 calldata->timestamp = jiffies;
6132 if (nfs4_setup_sequence(calldata->server->nfs_client,
6133 &calldata->arg.seq_args,
6134 &calldata->res.seq_res,
6135 task) != 0)
6136 nfs_release_seqid(calldata->arg.seqid);
6137 return;
6138 out_no_action:
6139 task->tk_action = NULL;
6140 out_wait:
6141 nfs4_sequence_done(task, &calldata->res.seq_res);
6142 }
6143
6144 static const struct rpc_call_ops nfs4_locku_ops = {
6145 .rpc_call_prepare = nfs4_locku_prepare,
6146 .rpc_call_done = nfs4_locku_done,
6147 .rpc_release = nfs4_locku_release_calldata,
6148 };
6149
6150 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
6151 struct nfs_open_context *ctx,
6152 struct nfs4_lock_state *lsp,
6153 struct nfs_seqid *seqid)
6154 {
6155 struct nfs4_unlockdata *data;
6156 struct rpc_message msg = {
6157 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
6158 .rpc_cred = ctx->cred,
6159 };
6160 struct rpc_task_setup task_setup_data = {
6161 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
6162 .rpc_message = &msg,
6163 .callback_ops = &nfs4_locku_ops,
6164 .workqueue = nfsiod_workqueue,
6165 .flags = RPC_TASK_ASYNC,
6166 };
6167
6168 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
6169 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
6170
6171 /* Ensure this is an unlock - when canceling a lock, the
6172 * canceled lock is passed in, and it won't be an unlock.
6173 */
6174 fl->fl_type = F_UNLCK;
6175 if (fl->fl_flags & FL_CLOSE)
6176 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
6177
6178 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
6179 if (data == NULL) {
6180 nfs_free_seqid(seqid);
6181 return ERR_PTR(-ENOMEM);
6182 }
6183
6184 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
6185 msg.rpc_argp = &data->arg;
6186 msg.rpc_resp = &data->res;
6187 task_setup_data.callback_data = data;
6188 return rpc_run_task(&task_setup_data);
6189 }
6190
6191 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
6192 {
6193 struct inode *inode = state->inode;
6194 struct nfs4_state_owner *sp = state->owner;
6195 struct nfs_inode *nfsi = NFS_I(inode);
6196 struct nfs_seqid *seqid;
6197 struct nfs4_lock_state *lsp;
6198 struct rpc_task *task;
6199 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6200 int status = 0;
6201 unsigned char fl_flags = request->fl_flags;
6202
6203 status = nfs4_set_lock_state(state, request);
6204 /* Unlock _before_ we do the RPC call */
6205 request->fl_flags |= FL_EXISTS;
6206 /* Exclude nfs_delegation_claim_locks() */
6207 mutex_lock(&sp->so_delegreturn_mutex);
6208 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
6209 down_read(&nfsi->rwsem);
6210 if (locks_lock_inode_wait(inode, request) == -ENOENT) {
6211 up_read(&nfsi->rwsem);
6212 mutex_unlock(&sp->so_delegreturn_mutex);
6213 goto out;
6214 }
6215 up_read(&nfsi->rwsem);
6216 mutex_unlock(&sp->so_delegreturn_mutex);
6217 if (status != 0)
6218 goto out;
6219 /* Is this a delegated lock? */
6220 lsp = request->fl_u.nfs4_fl.owner;
6221 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
6222 goto out;
6223 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
6224 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
6225 status = -ENOMEM;
6226 if (IS_ERR(seqid))
6227 goto out;
6228 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
6229 status = PTR_ERR(task);
6230 if (IS_ERR(task))
6231 goto out;
6232 status = rpc_wait_for_completion_task(task);
6233 rpc_put_task(task);
6234 out:
6235 request->fl_flags = fl_flags;
6236 trace_nfs4_unlock(request, state, F_SETLK, status);
6237 return status;
6238 }
6239
6240 struct nfs4_lockdata {
6241 struct nfs_lock_args arg;
6242 struct nfs_lock_res res;
6243 struct nfs4_lock_state *lsp;
6244 struct nfs_open_context *ctx;
6245 struct file_lock fl;
6246 unsigned long timestamp;
6247 int rpc_status;
6248 int cancelled;
6249 struct nfs_server *server;
6250 };
6251
6252 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
6253 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
6254 gfp_t gfp_mask)
6255 {
6256 struct nfs4_lockdata *p;
6257 struct inode *inode = lsp->ls_state->inode;
6258 struct nfs_server *server = NFS_SERVER(inode);
6259 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6260
6261 p = kzalloc(sizeof(*p), gfp_mask);
6262 if (p == NULL)
6263 return NULL;
6264
6265 p->arg.fh = NFS_FH(inode);
6266 p->arg.fl = &p->fl;
6267 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
6268 if (IS_ERR(p->arg.open_seqid))
6269 goto out_free;
6270 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
6271 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
6272 if (IS_ERR(p->arg.lock_seqid))
6273 goto out_free_seqid;
6274 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
6275 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
6276 p->arg.lock_owner.s_dev = server->s_dev;
6277 p->res.lock_seqid = p->arg.lock_seqid;
6278 p->lsp = lsp;
6279 p->server = server;
6280 refcount_inc(&lsp->ls_count);
6281 p->ctx = get_nfs_open_context(ctx);
6282 memcpy(&p->fl, fl, sizeof(p->fl));
6283 return p;
6284 out_free_seqid:
6285 nfs_free_seqid(p->arg.open_seqid);
6286 out_free:
6287 kfree(p);
6288 return NULL;
6289 }
6290
6291 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
6292 {
6293 struct nfs4_lockdata *data = calldata;
6294 struct nfs4_state *state = data->lsp->ls_state;
6295
6296 dprintk("%s: begin!\n", __func__);
6297 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
6298 goto out_wait;
6299 /* Do we need to do an open_to_lock_owner? */
6300 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
6301 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
6302 goto out_release_lock_seqid;
6303 }
6304 nfs4_stateid_copy(&data->arg.open_stateid,
6305 &state->open_stateid);
6306 data->arg.new_lock_owner = 1;
6307 data->res.open_seqid = data->arg.open_seqid;
6308 } else {
6309 data->arg.new_lock_owner = 0;
6310 nfs4_stateid_copy(&data->arg.lock_stateid,
6311 &data->lsp->ls_stateid);
6312 }
6313 if (!nfs4_valid_open_stateid(state)) {
6314 data->rpc_status = -EBADF;
6315 task->tk_action = NULL;
6316 goto out_release_open_seqid;
6317 }
6318 data->timestamp = jiffies;
6319 if (nfs4_setup_sequence(data->server->nfs_client,
6320 &data->arg.seq_args,
6321 &data->res.seq_res,
6322 task) == 0)
6323 return;
6324 out_release_open_seqid:
6325 nfs_release_seqid(data->arg.open_seqid);
6326 out_release_lock_seqid:
6327 nfs_release_seqid(data->arg.lock_seqid);
6328 out_wait:
6329 nfs4_sequence_done(task, &data->res.seq_res);
6330 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
6331 }
6332
6333 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
6334 {
6335 struct nfs4_lockdata *data = calldata;
6336 struct nfs4_lock_state *lsp = data->lsp;
6337
6338 dprintk("%s: begin!\n", __func__);
6339
6340 if (!nfs4_sequence_done(task, &data->res.seq_res))
6341 return;
6342
6343 data->rpc_status = task->tk_status;
6344 switch (task->tk_status) {
6345 case 0:
6346 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
6347 data->timestamp);
6348 if (data->arg.new_lock) {
6349 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
6350 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) {
6351 rpc_restart_call_prepare(task);
6352 break;
6353 }
6354 }
6355 if (data->arg.new_lock_owner != 0) {
6356 nfs_confirm_seqid(&lsp->ls_seqid, 0);
6357 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
6358 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6359 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
6360 rpc_restart_call_prepare(task);
6361 break;
6362 case -NFS4ERR_BAD_STATEID:
6363 case -NFS4ERR_OLD_STATEID:
6364 case -NFS4ERR_STALE_STATEID:
6365 case -NFS4ERR_EXPIRED:
6366 if (data->arg.new_lock_owner != 0) {
6367 if (!nfs4_stateid_match(&data->arg.open_stateid,
6368 &lsp->ls_state->open_stateid))
6369 rpc_restart_call_prepare(task);
6370 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
6371 &lsp->ls_stateid))
6372 rpc_restart_call_prepare(task);
6373 }
6374 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
6375 }
6376
6377 static void nfs4_lock_release(void *calldata)
6378 {
6379 struct nfs4_lockdata *data = calldata;
6380
6381 dprintk("%s: begin!\n", __func__);
6382 nfs_free_seqid(data->arg.open_seqid);
6383 if (data->cancelled) {
6384 struct rpc_task *task;
6385 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
6386 data->arg.lock_seqid);
6387 if (!IS_ERR(task))
6388 rpc_put_task_async(task);
6389 dprintk("%s: cancelling lock!\n", __func__);
6390 } else
6391 nfs_free_seqid(data->arg.lock_seqid);
6392 nfs4_put_lock_state(data->lsp);
6393 put_nfs_open_context(data->ctx);
6394 kfree(data);
6395 dprintk("%s: done!\n", __func__);
6396 }
6397
6398 static const struct rpc_call_ops nfs4_lock_ops = {
6399 .rpc_call_prepare = nfs4_lock_prepare,
6400 .rpc_call_done = nfs4_lock_done,
6401 .rpc_release = nfs4_lock_release,
6402 };
6403
6404 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
6405 {
6406 switch (error) {
6407 case -NFS4ERR_ADMIN_REVOKED:
6408 case -NFS4ERR_EXPIRED:
6409 case -NFS4ERR_BAD_STATEID:
6410 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
6411 if (new_lock_owner != 0 ||
6412 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
6413 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
6414 break;
6415 case -NFS4ERR_STALE_STATEID:
6416 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
6417 nfs4_schedule_lease_recovery(server->nfs_client);
6418 };
6419 }
6420
6421 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
6422 {
6423 struct nfs4_lockdata *data;
6424 struct rpc_task *task;
6425 struct rpc_message msg = {
6426 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
6427 .rpc_cred = state->owner->so_cred,
6428 };
6429 struct rpc_task_setup task_setup_data = {
6430 .rpc_client = NFS_CLIENT(state->inode),
6431 .rpc_message = &msg,
6432 .callback_ops = &nfs4_lock_ops,
6433 .workqueue = nfsiod_workqueue,
6434 .flags = RPC_TASK_ASYNC,
6435 };
6436 int ret;
6437
6438 dprintk("%s: begin!\n", __func__);
6439 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
6440 fl->fl_u.nfs4_fl.owner,
6441 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
6442 if (data == NULL)
6443 return -ENOMEM;
6444 if (IS_SETLKW(cmd))
6445 data->arg.block = 1;
6446 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
6447 msg.rpc_argp = &data->arg;
6448 msg.rpc_resp = &data->res;
6449 task_setup_data.callback_data = data;
6450 if (recovery_type > NFS_LOCK_NEW) {
6451 if (recovery_type == NFS_LOCK_RECLAIM)
6452 data->arg.reclaim = NFS_LOCK_RECLAIM;
6453 nfs4_set_sequence_privileged(&data->arg.seq_args);
6454 } else
6455 data->arg.new_lock = 1;
6456 task = rpc_run_task(&task_setup_data);
6457 if (IS_ERR(task))
6458 return PTR_ERR(task);
6459 ret = rpc_wait_for_completion_task(task);
6460 if (ret == 0) {
6461 ret = data->rpc_status;
6462 if (ret)
6463 nfs4_handle_setlk_error(data->server, data->lsp,
6464 data->arg.new_lock_owner, ret);
6465 } else
6466 data->cancelled = true;
6467 rpc_put_task(task);
6468 dprintk("%s: done, ret = %d!\n", __func__, ret);
6469 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
6470 return ret;
6471 }
6472
6473 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
6474 {
6475 struct nfs_server *server = NFS_SERVER(state->inode);
6476 struct nfs4_exception exception = {
6477 .inode = state->inode,
6478 };
6479 int err;
6480
6481 do {
6482 /* Cache the lock if possible... */
6483 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
6484 return 0;
6485 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
6486 if (err != -NFS4ERR_DELAY)
6487 break;
6488 nfs4_handle_exception(server, err, &exception);
6489 } while (exception.retry);
6490 return err;
6491 }
6492
6493 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
6494 {
6495 struct nfs_server *server = NFS_SERVER(state->inode);
6496 struct nfs4_exception exception = {
6497 .inode = state->inode,
6498 };
6499 int err;
6500
6501 err = nfs4_set_lock_state(state, request);
6502 if (err != 0)
6503 return err;
6504 if (!recover_lost_locks) {
6505 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
6506 return 0;
6507 }
6508 do {
6509 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
6510 return 0;
6511 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
6512 switch (err) {
6513 default:
6514 goto out;
6515 case -NFS4ERR_GRACE:
6516 case -NFS4ERR_DELAY:
6517 nfs4_handle_exception(server, err, &exception);
6518 err = 0;
6519 }
6520 } while (exception.retry);
6521 out:
6522 return err;
6523 }
6524
6525 #if defined(CONFIG_NFS_V4_1)
6526 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
6527 {
6528 struct nfs4_lock_state *lsp;
6529 int status;
6530
6531 status = nfs4_set_lock_state(state, request);
6532 if (status != 0)
6533 return status;
6534 lsp = request->fl_u.nfs4_fl.owner;
6535 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
6536 test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
6537 return 0;
6538 return nfs4_lock_expired(state, request);
6539 }
6540 #endif
6541
6542 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6543 {
6544 struct nfs_inode *nfsi = NFS_I(state->inode);
6545 struct nfs4_state_owner *sp = state->owner;
6546 unsigned char fl_flags = request->fl_flags;
6547 int status;
6548
6549 request->fl_flags |= FL_ACCESS;
6550 status = locks_lock_inode_wait(state->inode, request);
6551 if (status < 0)
6552 goto out;
6553 mutex_lock(&sp->so_delegreturn_mutex);
6554 down_read(&nfsi->rwsem);
6555 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6556 /* Yes: cache locks! */
6557 /* ...but avoid races with delegation recall... */
6558 request->fl_flags = fl_flags & ~FL_SLEEP;
6559 status = locks_lock_inode_wait(state->inode, request);
6560 up_read(&nfsi->rwsem);
6561 mutex_unlock(&sp->so_delegreturn_mutex);
6562 goto out;
6563 }
6564 up_read(&nfsi->rwsem);
6565 mutex_unlock(&sp->so_delegreturn_mutex);
6566 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6567 out:
6568 request->fl_flags = fl_flags;
6569 return status;
6570 }
6571
6572 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6573 {
6574 struct nfs4_exception exception = {
6575 .state = state,
6576 .inode = state->inode,
6577 };
6578 int err;
6579
6580 do {
6581 err = _nfs4_proc_setlk(state, cmd, request);
6582 if (err == -NFS4ERR_DENIED)
6583 err = -EAGAIN;
6584 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6585 err, &exception);
6586 } while (exception.retry);
6587 return err;
6588 }
6589
6590 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
6591 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
6592
6593 static int
6594 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
6595 struct file_lock *request)
6596 {
6597 int status = -ERESTARTSYS;
6598 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6599
6600 while(!signalled()) {
6601 status = nfs4_proc_setlk(state, cmd, request);
6602 if ((status != -EAGAIN) || IS_SETLK(cmd))
6603 break;
6604 freezable_schedule_timeout_interruptible(timeout);
6605 timeout *= 2;
6606 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
6607 status = -ERESTARTSYS;
6608 }
6609 return status;
6610 }
6611
6612 #ifdef CONFIG_NFS_V4_1
6613 struct nfs4_lock_waiter {
6614 struct task_struct *task;
6615 struct inode *inode;
6616 struct nfs_lowner *owner;
6617 bool notified;
6618 };
6619
6620 static int
6621 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
6622 {
6623 int ret;
6624 struct cb_notify_lock_args *cbnl = key;
6625 struct nfs4_lock_waiter *waiter = wait->private;
6626 struct nfs_lowner *lowner = &cbnl->cbnl_owner,
6627 *wowner = waiter->owner;
6628
6629 /* Only wake if the callback was for the same owner */
6630 if (lowner->clientid != wowner->clientid ||
6631 lowner->id != wowner->id ||
6632 lowner->s_dev != wowner->s_dev)
6633 return 0;
6634
6635 /* Make sure it's for the right inode */
6636 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
6637 return 0;
6638
6639 waiter->notified = true;
6640
6641 /* override "private" so we can use default_wake_function */
6642 wait->private = waiter->task;
6643 ret = autoremove_wake_function(wait, mode, flags, key);
6644 wait->private = waiter;
6645 return ret;
6646 }
6647
6648 static int
6649 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6650 {
6651 int status = -ERESTARTSYS;
6652 unsigned long flags;
6653 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
6654 struct nfs_server *server = NFS_SERVER(state->inode);
6655 struct nfs_client *clp = server->nfs_client;
6656 wait_queue_head_t *q = &clp->cl_lock_waitq;
6657 struct nfs_lowner owner = { .clientid = clp->cl_clientid,
6658 .id = lsp->ls_seqid.owner_id,
6659 .s_dev = server->s_dev };
6660 struct nfs4_lock_waiter waiter = { .task = current,
6661 .inode = state->inode,
6662 .owner = &owner,
6663 .notified = false };
6664 wait_queue_entry_t wait;
6665
6666 /* Don't bother with waitqueue if we don't expect a callback */
6667 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
6668 return nfs4_retry_setlk_simple(state, cmd, request);
6669
6670 init_wait(&wait);
6671 wait.private = &waiter;
6672 wait.func = nfs4_wake_lock_waiter;
6673 add_wait_queue(q, &wait);
6674
6675 while(!signalled()) {
6676 status = nfs4_proc_setlk(state, cmd, request);
6677 if ((status != -EAGAIN) || IS_SETLK(cmd))
6678 break;
6679
6680 status = -ERESTARTSYS;
6681 spin_lock_irqsave(&q->lock, flags);
6682 if (waiter.notified) {
6683 spin_unlock_irqrestore(&q->lock, flags);
6684 continue;
6685 }
6686 set_current_state(TASK_INTERRUPTIBLE);
6687 spin_unlock_irqrestore(&q->lock, flags);
6688
6689 freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
6690 }
6691
6692 finish_wait(q, &wait);
6693 return status;
6694 }
6695 #else /* !CONFIG_NFS_V4_1 */
6696 static inline int
6697 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6698 {
6699 return nfs4_retry_setlk_simple(state, cmd, request);
6700 }
6701 #endif
6702
6703 static int
6704 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6705 {
6706 struct nfs_open_context *ctx;
6707 struct nfs4_state *state;
6708 int status;
6709
6710 /* verify open state */
6711 ctx = nfs_file_open_context(filp);
6712 state = ctx->state;
6713
6714 if (IS_GETLK(cmd)) {
6715 if (state != NULL)
6716 return nfs4_proc_getlk(state, F_GETLK, request);
6717 return 0;
6718 }
6719
6720 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6721 return -EINVAL;
6722
6723 if (request->fl_type == F_UNLCK) {
6724 if (state != NULL)
6725 return nfs4_proc_unlck(state, cmd, request);
6726 return 0;
6727 }
6728
6729 if (state == NULL)
6730 return -ENOLCK;
6731
6732 if ((request->fl_flags & FL_POSIX) &&
6733 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6734 return -ENOLCK;
6735
6736 /*
6737 * Don't rely on the VFS having checked the file open mode,
6738 * since it won't do this for flock() locks.
6739 */
6740 switch (request->fl_type) {
6741 case F_RDLCK:
6742 if (!(filp->f_mode & FMODE_READ))
6743 return -EBADF;
6744 break;
6745 case F_WRLCK:
6746 if (!(filp->f_mode & FMODE_WRITE))
6747 return -EBADF;
6748 }
6749
6750 status = nfs4_set_lock_state(state, request);
6751 if (status != 0)
6752 return status;
6753
6754 return nfs4_retry_setlk(state, cmd, request);
6755 }
6756
6757 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6758 {
6759 struct nfs_server *server = NFS_SERVER(state->inode);
6760 int err;
6761
6762 err = nfs4_set_lock_state(state, fl);
6763 if (err != 0)
6764 return err;
6765 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6766 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
6767 }
6768
6769 struct nfs_release_lockowner_data {
6770 struct nfs4_lock_state *lsp;
6771 struct nfs_server *server;
6772 struct nfs_release_lockowner_args args;
6773 struct nfs_release_lockowner_res res;
6774 unsigned long timestamp;
6775 };
6776
6777 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6778 {
6779 struct nfs_release_lockowner_data *data = calldata;
6780 struct nfs_server *server = data->server;
6781 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
6782 &data->res.seq_res, task);
6783 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6784 data->timestamp = jiffies;
6785 }
6786
6787 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6788 {
6789 struct nfs_release_lockowner_data *data = calldata;
6790 struct nfs_server *server = data->server;
6791
6792 nfs40_sequence_done(task, &data->res.seq_res);
6793
6794 switch (task->tk_status) {
6795 case 0:
6796 renew_lease(server, data->timestamp);
6797 break;
6798 case -NFS4ERR_STALE_CLIENTID:
6799 case -NFS4ERR_EXPIRED:
6800 nfs4_schedule_lease_recovery(server->nfs_client);
6801 break;
6802 case -NFS4ERR_LEASE_MOVED:
6803 case -NFS4ERR_DELAY:
6804 if (nfs4_async_handle_error(task, server,
6805 NULL, NULL) == -EAGAIN)
6806 rpc_restart_call_prepare(task);
6807 }
6808 }
6809
6810 static void nfs4_release_lockowner_release(void *calldata)
6811 {
6812 struct nfs_release_lockowner_data *data = calldata;
6813 nfs4_free_lock_state(data->server, data->lsp);
6814 kfree(calldata);
6815 }
6816
6817 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6818 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6819 .rpc_call_done = nfs4_release_lockowner_done,
6820 .rpc_release = nfs4_release_lockowner_release,
6821 };
6822
6823 static void
6824 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6825 {
6826 struct nfs_release_lockowner_data *data;
6827 struct rpc_message msg = {
6828 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6829 };
6830
6831 if (server->nfs_client->cl_mvops->minor_version != 0)
6832 return;
6833
6834 data = kmalloc(sizeof(*data), GFP_NOFS);
6835 if (!data)
6836 return;
6837 data->lsp = lsp;
6838 data->server = server;
6839 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6840 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6841 data->args.lock_owner.s_dev = server->s_dev;
6842
6843 msg.rpc_argp = &data->args;
6844 msg.rpc_resp = &data->res;
6845 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6846 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6847 }
6848
6849 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6850
6851 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
6852 struct dentry *unused, struct inode *inode,
6853 const char *key, const void *buf,
6854 size_t buflen, int flags)
6855 {
6856 return nfs4_proc_set_acl(inode, buf, buflen);
6857 }
6858
6859 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
6860 struct dentry *unused, struct inode *inode,
6861 const char *key, void *buf, size_t buflen)
6862 {
6863 return nfs4_proc_get_acl(inode, buf, buflen);
6864 }
6865
6866 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
6867 {
6868 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
6869 }
6870
6871 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6872
6873 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
6874 struct dentry *unused, struct inode *inode,
6875 const char *key, const void *buf,
6876 size_t buflen, int flags)
6877 {
6878 if (security_ismaclabel(key))
6879 return nfs4_set_security_label(inode, buf, buflen);
6880
6881 return -EOPNOTSUPP;
6882 }
6883
6884 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
6885 struct dentry *unused, struct inode *inode,
6886 const char *key, void *buf, size_t buflen)
6887 {
6888 if (security_ismaclabel(key))
6889 return nfs4_get_security_label(inode, buf, buflen);
6890 return -EOPNOTSUPP;
6891 }
6892
6893 static ssize_t
6894 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
6895 {
6896 int len = 0;
6897
6898 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
6899 len = security_inode_listsecurity(inode, list, list_len);
6900 if (list_len && len > list_len)
6901 return -ERANGE;
6902 }
6903 return len;
6904 }
6905
6906 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6907 .prefix = XATTR_SECURITY_PREFIX,
6908 .get = nfs4_xattr_get_nfs4_label,
6909 .set = nfs4_xattr_set_nfs4_label,
6910 };
6911
6912 #else
6913
6914 static ssize_t
6915 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
6916 {
6917 return 0;
6918 }
6919
6920 #endif
6921
6922 /*
6923 * nfs_fhget will use either the mounted_on_fileid or the fileid
6924 */
6925 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6926 {
6927 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6928 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6929 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6930 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6931 return;
6932
6933 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6934 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6935 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6936 fattr->nlink = 2;
6937 }
6938
6939 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6940 const struct qstr *name,
6941 struct nfs4_fs_locations *fs_locations,
6942 struct page *page)
6943 {
6944 struct nfs_server *server = NFS_SERVER(dir);
6945 u32 bitmask[3];
6946 struct nfs4_fs_locations_arg args = {
6947 .dir_fh = NFS_FH(dir),
6948 .name = name,
6949 .page = page,
6950 .bitmask = bitmask,
6951 };
6952 struct nfs4_fs_locations_res res = {
6953 .fs_locations = fs_locations,
6954 };
6955 struct rpc_message msg = {
6956 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6957 .rpc_argp = &args,
6958 .rpc_resp = &res,
6959 };
6960 int status;
6961
6962 dprintk("%s: start\n", __func__);
6963
6964 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
6965 bitmask[1] = nfs4_fattr_bitmap[1];
6966
6967 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6968 * is not supported */
6969 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6970 bitmask[0] &= ~FATTR4_WORD0_FILEID;
6971 else
6972 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
6973
6974 nfs_fattr_init(&fs_locations->fattr);
6975 fs_locations->server = server;
6976 fs_locations->nlocations = 0;
6977 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6978 dprintk("%s: returned status = %d\n", __func__, status);
6979 return status;
6980 }
6981
6982 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6983 const struct qstr *name,
6984 struct nfs4_fs_locations *fs_locations,
6985 struct page *page)
6986 {
6987 struct nfs4_exception exception = { };
6988 int err;
6989 do {
6990 err = _nfs4_proc_fs_locations(client, dir, name,
6991 fs_locations, page);
6992 trace_nfs4_get_fs_locations(dir, name, err);
6993 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6994 &exception);
6995 } while (exception.retry);
6996 return err;
6997 }
6998
6999 /*
7000 * This operation also signals the server that this client is
7001 * performing migration recovery. The server can stop returning
7002 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
7003 * appended to this compound to identify the client ID which is
7004 * performing recovery.
7005 */
7006 static int _nfs40_proc_get_locations(struct inode *inode,
7007 struct nfs4_fs_locations *locations,
7008 struct page *page, struct rpc_cred *cred)
7009 {
7010 struct nfs_server *server = NFS_SERVER(inode);
7011 struct rpc_clnt *clnt = server->client;
7012 u32 bitmask[2] = {
7013 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7014 };
7015 struct nfs4_fs_locations_arg args = {
7016 .clientid = server->nfs_client->cl_clientid,
7017 .fh = NFS_FH(inode),
7018 .page = page,
7019 .bitmask = bitmask,
7020 .migration = 1, /* skip LOOKUP */
7021 .renew = 1, /* append RENEW */
7022 };
7023 struct nfs4_fs_locations_res res = {
7024 .fs_locations = locations,
7025 .migration = 1,
7026 .renew = 1,
7027 };
7028 struct rpc_message msg = {
7029 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7030 .rpc_argp = &args,
7031 .rpc_resp = &res,
7032 .rpc_cred = cred,
7033 };
7034 unsigned long now = jiffies;
7035 int status;
7036
7037 nfs_fattr_init(&locations->fattr);
7038 locations->server = server;
7039 locations->nlocations = 0;
7040
7041 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
7042 nfs4_set_sequence_privileged(&args.seq_args);
7043 status = nfs4_call_sync_sequence(clnt, server, &msg,
7044 &args.seq_args, &res.seq_res);
7045 if (status)
7046 return status;
7047
7048 renew_lease(server, now);
7049 return 0;
7050 }
7051
7052 #ifdef CONFIG_NFS_V4_1
7053
7054 /*
7055 * This operation also signals the server that this client is
7056 * performing migration recovery. The server can stop asserting
7057 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
7058 * performing this operation is identified in the SEQUENCE
7059 * operation in this compound.
7060 *
7061 * When the client supports GETATTR(fs_locations_info), it can
7062 * be plumbed in here.
7063 */
7064 static int _nfs41_proc_get_locations(struct inode *inode,
7065 struct nfs4_fs_locations *locations,
7066 struct page *page, struct rpc_cred *cred)
7067 {
7068 struct nfs_server *server = NFS_SERVER(inode);
7069 struct rpc_clnt *clnt = server->client;
7070 u32 bitmask[2] = {
7071 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7072 };
7073 struct nfs4_fs_locations_arg args = {
7074 .fh = NFS_FH(inode),
7075 .page = page,
7076 .bitmask = bitmask,
7077 .migration = 1, /* skip LOOKUP */
7078 };
7079 struct nfs4_fs_locations_res res = {
7080 .fs_locations = locations,
7081 .migration = 1,
7082 };
7083 struct rpc_message msg = {
7084 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7085 .rpc_argp = &args,
7086 .rpc_resp = &res,
7087 .rpc_cred = cred,
7088 };
7089 int status;
7090
7091 nfs_fattr_init(&locations->fattr);
7092 locations->server = server;
7093 locations->nlocations = 0;
7094
7095 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
7096 nfs4_set_sequence_privileged(&args.seq_args);
7097 status = nfs4_call_sync_sequence(clnt, server, &msg,
7098 &args.seq_args, &res.seq_res);
7099 if (status == NFS4_OK &&
7100 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
7101 status = -NFS4ERR_LEASE_MOVED;
7102 return status;
7103 }
7104
7105 #endif /* CONFIG_NFS_V4_1 */
7106
7107 /**
7108 * nfs4_proc_get_locations - discover locations for a migrated FSID
7109 * @inode: inode on FSID that is migrating
7110 * @locations: result of query
7111 * @page: buffer
7112 * @cred: credential to use for this operation
7113 *
7114 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
7115 * operation failed, or a negative errno if a local error occurred.
7116 *
7117 * On success, "locations" is filled in, but if the server has
7118 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
7119 * asserted.
7120 *
7121 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
7122 * from this client that require migration recovery.
7123 */
7124 int nfs4_proc_get_locations(struct inode *inode,
7125 struct nfs4_fs_locations *locations,
7126 struct page *page, struct rpc_cred *cred)
7127 {
7128 struct nfs_server *server = NFS_SERVER(inode);
7129 struct nfs_client *clp = server->nfs_client;
7130 const struct nfs4_mig_recovery_ops *ops =
7131 clp->cl_mvops->mig_recovery_ops;
7132 struct nfs4_exception exception = { };
7133 int status;
7134
7135 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
7136 (unsigned long long)server->fsid.major,
7137 (unsigned long long)server->fsid.minor,
7138 clp->cl_hostname);
7139 nfs_display_fhandle(NFS_FH(inode), __func__);
7140
7141 do {
7142 status = ops->get_locations(inode, locations, page, cred);
7143 if (status != -NFS4ERR_DELAY)
7144 break;
7145 nfs4_handle_exception(server, status, &exception);
7146 } while (exception.retry);
7147 return status;
7148 }
7149
7150 /*
7151 * This operation also signals the server that this client is
7152 * performing "lease moved" recovery. The server can stop
7153 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
7154 * is appended to this compound to identify the client ID which is
7155 * performing recovery.
7156 */
7157 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
7158 {
7159 struct nfs_server *server = NFS_SERVER(inode);
7160 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
7161 struct rpc_clnt *clnt = server->client;
7162 struct nfs4_fsid_present_arg args = {
7163 .fh = NFS_FH(inode),
7164 .clientid = clp->cl_clientid,
7165 .renew = 1, /* append RENEW */
7166 };
7167 struct nfs4_fsid_present_res res = {
7168 .renew = 1,
7169 };
7170 struct rpc_message msg = {
7171 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
7172 .rpc_argp = &args,
7173 .rpc_resp = &res,
7174 .rpc_cred = cred,
7175 };
7176 unsigned long now = jiffies;
7177 int status;
7178
7179 res.fh = nfs_alloc_fhandle();
7180 if (res.fh == NULL)
7181 return -ENOMEM;
7182
7183 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
7184 nfs4_set_sequence_privileged(&args.seq_args);
7185 status = nfs4_call_sync_sequence(clnt, server, &msg,
7186 &args.seq_args, &res.seq_res);
7187 nfs_free_fhandle(res.fh);
7188 if (status)
7189 return status;
7190
7191 do_renew_lease(clp, now);
7192 return 0;
7193 }
7194
7195 #ifdef CONFIG_NFS_V4_1
7196
7197 /*
7198 * This operation also signals the server that this client is
7199 * performing "lease moved" recovery. The server can stop asserting
7200 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
7201 * this operation is identified in the SEQUENCE operation in this
7202 * compound.
7203 */
7204 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
7205 {
7206 struct nfs_server *server = NFS_SERVER(inode);
7207 struct rpc_clnt *clnt = server->client;
7208 struct nfs4_fsid_present_arg args = {
7209 .fh = NFS_FH(inode),
7210 };
7211 struct nfs4_fsid_present_res res = {
7212 };
7213 struct rpc_message msg = {
7214 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
7215 .rpc_argp = &args,
7216 .rpc_resp = &res,
7217 .rpc_cred = cred,
7218 };
7219 int status;
7220
7221 res.fh = nfs_alloc_fhandle();
7222 if (res.fh == NULL)
7223 return -ENOMEM;
7224
7225 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
7226 nfs4_set_sequence_privileged(&args.seq_args);
7227 status = nfs4_call_sync_sequence(clnt, server, &msg,
7228 &args.seq_args, &res.seq_res);
7229 nfs_free_fhandle(res.fh);
7230 if (status == NFS4_OK &&
7231 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
7232 status = -NFS4ERR_LEASE_MOVED;
7233 return status;
7234 }
7235
7236 #endif /* CONFIG_NFS_V4_1 */
7237
7238 /**
7239 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
7240 * @inode: inode on FSID to check
7241 * @cred: credential to use for this operation
7242 *
7243 * Server indicates whether the FSID is present, moved, or not
7244 * recognized. This operation is necessary to clear a LEASE_MOVED
7245 * condition for this client ID.
7246 *
7247 * Returns NFS4_OK if the FSID is present on this server,
7248 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
7249 * NFS4ERR code if some error occurred on the server, or a
7250 * negative errno if a local failure occurred.
7251 */
7252 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
7253 {
7254 struct nfs_server *server = NFS_SERVER(inode);
7255 struct nfs_client *clp = server->nfs_client;
7256 const struct nfs4_mig_recovery_ops *ops =
7257 clp->cl_mvops->mig_recovery_ops;
7258 struct nfs4_exception exception = { };
7259 int status;
7260
7261 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
7262 (unsigned long long)server->fsid.major,
7263 (unsigned long long)server->fsid.minor,
7264 clp->cl_hostname);
7265 nfs_display_fhandle(NFS_FH(inode), __func__);
7266
7267 do {
7268 status = ops->fsid_present(inode, cred);
7269 if (status != -NFS4ERR_DELAY)
7270 break;
7271 nfs4_handle_exception(server, status, &exception);
7272 } while (exception.retry);
7273 return status;
7274 }
7275
7276 /**
7277 * If 'use_integrity' is true and the state managment nfs_client
7278 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
7279 * and the machine credential as per RFC3530bis and RFC5661 Security
7280 * Considerations sections. Otherwise, just use the user cred with the
7281 * filesystem's rpc_client.
7282 */
7283 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
7284 {
7285 int status;
7286 struct nfs4_secinfo_arg args = {
7287 .dir_fh = NFS_FH(dir),
7288 .name = name,
7289 };
7290 struct nfs4_secinfo_res res = {
7291 .flavors = flavors,
7292 };
7293 struct rpc_message msg = {
7294 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
7295 .rpc_argp = &args,
7296 .rpc_resp = &res,
7297 };
7298 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
7299 struct rpc_cred *cred = NULL;
7300
7301 if (use_integrity) {
7302 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
7303 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
7304 msg.rpc_cred = cred;
7305 }
7306
7307 dprintk("NFS call secinfo %s\n", name->name);
7308
7309 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
7310 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
7311
7312 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
7313 &res.seq_res, 0);
7314 dprintk("NFS reply secinfo: %d\n", status);
7315
7316 if (cred)
7317 put_rpccred(cred);
7318
7319 return status;
7320 }
7321
7322 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
7323 struct nfs4_secinfo_flavors *flavors)
7324 {
7325 struct nfs4_exception exception = { };
7326 int err;
7327 do {
7328 err = -NFS4ERR_WRONGSEC;
7329
7330 /* try to use integrity protection with machine cred */
7331 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
7332 err = _nfs4_proc_secinfo(dir, name, flavors, true);
7333
7334 /*
7335 * if unable to use integrity protection, or SECINFO with
7336 * integrity protection returns NFS4ERR_WRONGSEC (which is
7337 * disallowed by spec, but exists in deployed servers) use
7338 * the current filesystem's rpc_client and the user cred.
7339 */
7340 if (err == -NFS4ERR_WRONGSEC)
7341 err = _nfs4_proc_secinfo(dir, name, flavors, false);
7342
7343 trace_nfs4_secinfo(dir, name, err);
7344 err = nfs4_handle_exception(NFS_SERVER(dir), err,
7345 &exception);
7346 } while (exception.retry);
7347 return err;
7348 }
7349
7350 #ifdef CONFIG_NFS_V4_1
7351 /*
7352 * Check the exchange flags returned by the server for invalid flags, having
7353 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
7354 * DS flags set.
7355 */
7356 static int nfs4_check_cl_exchange_flags(u32 flags)
7357 {
7358 if (flags & ~EXCHGID4_FLAG_MASK_R)
7359 goto out_inval;
7360 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
7361 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
7362 goto out_inval;
7363 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
7364 goto out_inval;
7365 return NFS_OK;
7366 out_inval:
7367 return -NFS4ERR_INVAL;
7368 }
7369
7370 static bool
7371 nfs41_same_server_scope(struct nfs41_server_scope *a,
7372 struct nfs41_server_scope *b)
7373 {
7374 if (a->server_scope_sz != b->server_scope_sz)
7375 return false;
7376 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
7377 }
7378
7379 static void
7380 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
7381 {
7382 }
7383
7384 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
7385 .rpc_call_done = &nfs4_bind_one_conn_to_session_done,
7386 };
7387
7388 /*
7389 * nfs4_proc_bind_one_conn_to_session()
7390 *
7391 * The 4.1 client currently uses the same TCP connection for the
7392 * fore and backchannel.
7393 */
7394 static
7395 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
7396 struct rpc_xprt *xprt,
7397 struct nfs_client *clp,
7398 struct rpc_cred *cred)
7399 {
7400 int status;
7401 struct nfs41_bind_conn_to_session_args args = {
7402 .client = clp,
7403 .dir = NFS4_CDFC4_FORE_OR_BOTH,
7404 };
7405 struct nfs41_bind_conn_to_session_res res;
7406 struct rpc_message msg = {
7407 .rpc_proc =
7408 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
7409 .rpc_argp = &args,
7410 .rpc_resp = &res,
7411 .rpc_cred = cred,
7412 };
7413 struct rpc_task_setup task_setup_data = {
7414 .rpc_client = clnt,
7415 .rpc_xprt = xprt,
7416 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
7417 .rpc_message = &msg,
7418 .flags = RPC_TASK_TIMEOUT,
7419 };
7420 struct rpc_task *task;
7421
7422 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
7423 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
7424 args.dir = NFS4_CDFC4_FORE;
7425
7426 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
7427 if (xprt != rcu_access_pointer(clnt->cl_xprt))
7428 args.dir = NFS4_CDFC4_FORE;
7429
7430 task = rpc_run_task(&task_setup_data);
7431 if (!IS_ERR(task)) {
7432 status = task->tk_status;
7433 rpc_put_task(task);
7434 } else
7435 status = PTR_ERR(task);
7436 trace_nfs4_bind_conn_to_session(clp, status);
7437 if (status == 0) {
7438 if (memcmp(res.sessionid.data,
7439 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
7440 dprintk("NFS: %s: Session ID mismatch\n", __func__);
7441 return -EIO;
7442 }
7443 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
7444 dprintk("NFS: %s: Unexpected direction from server\n",
7445 __func__);
7446 return -EIO;
7447 }
7448 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
7449 dprintk("NFS: %s: Server returned RDMA mode = true\n",
7450 __func__);
7451 return -EIO;
7452 }
7453 }
7454
7455 return status;
7456 }
7457
7458 struct rpc_bind_conn_calldata {
7459 struct nfs_client *clp;
7460 struct rpc_cred *cred;
7461 };
7462
7463 static int
7464 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
7465 struct rpc_xprt *xprt,
7466 void *calldata)
7467 {
7468 struct rpc_bind_conn_calldata *p = calldata;
7469
7470 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
7471 }
7472
7473 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
7474 {
7475 struct rpc_bind_conn_calldata data = {
7476 .clp = clp,
7477 .cred = cred,
7478 };
7479 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
7480 nfs4_proc_bind_conn_to_session_callback, &data);
7481 }
7482
7483 /*
7484 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
7485 * and operations we'd like to see to enable certain features in the allow map
7486 */
7487 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
7488 .how = SP4_MACH_CRED,
7489 .enforce.u.words = {
7490 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
7491 1 << (OP_EXCHANGE_ID - 32) |
7492 1 << (OP_CREATE_SESSION - 32) |
7493 1 << (OP_DESTROY_SESSION - 32) |
7494 1 << (OP_DESTROY_CLIENTID - 32)
7495 },
7496 .allow.u.words = {
7497 [0] = 1 << (OP_CLOSE) |
7498 1 << (OP_OPEN_DOWNGRADE) |
7499 1 << (OP_LOCKU) |
7500 1 << (OP_DELEGRETURN) |
7501 1 << (OP_COMMIT),
7502 [1] = 1 << (OP_SECINFO - 32) |
7503 1 << (OP_SECINFO_NO_NAME - 32) |
7504 1 << (OP_LAYOUTRETURN - 32) |
7505 1 << (OP_TEST_STATEID - 32) |
7506 1 << (OP_FREE_STATEID - 32) |
7507 1 << (OP_WRITE - 32)
7508 }
7509 };
7510
7511 /*
7512 * Select the state protection mode for client `clp' given the server results
7513 * from exchange_id in `sp'.
7514 *
7515 * Returns 0 on success, negative errno otherwise.
7516 */
7517 static int nfs4_sp4_select_mode(struct nfs_client *clp,
7518 struct nfs41_state_protection *sp)
7519 {
7520 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
7521 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
7522 1 << (OP_EXCHANGE_ID - 32) |
7523 1 << (OP_CREATE_SESSION - 32) |
7524 1 << (OP_DESTROY_SESSION - 32) |
7525 1 << (OP_DESTROY_CLIENTID - 32)
7526 };
7527 unsigned long flags = 0;
7528 unsigned int i;
7529 int ret = 0;
7530
7531 if (sp->how == SP4_MACH_CRED) {
7532 /* Print state protect result */
7533 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
7534 for (i = 0; i <= LAST_NFS4_OP; i++) {
7535 if (test_bit(i, sp->enforce.u.longs))
7536 dfprintk(MOUNT, " enforce op %d\n", i);
7537 if (test_bit(i, sp->allow.u.longs))
7538 dfprintk(MOUNT, " allow op %d\n", i);
7539 }
7540
7541 /* make sure nothing is on enforce list that isn't supported */
7542 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
7543 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
7544 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
7545 ret = -EINVAL;
7546 goto out;
7547 }
7548 }
7549
7550 /*
7551 * Minimal mode - state operations are allowed to use machine
7552 * credential. Note this already happens by default, so the
7553 * client doesn't have to do anything more than the negotiation.
7554 *
7555 * NOTE: we don't care if EXCHANGE_ID is in the list -
7556 * we're already using the machine cred for exchange_id
7557 * and will never use a different cred.
7558 */
7559 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
7560 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
7561 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
7562 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
7563 dfprintk(MOUNT, "sp4_mach_cred:\n");
7564 dfprintk(MOUNT, " minimal mode enabled\n");
7565 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
7566 } else {
7567 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
7568 ret = -EINVAL;
7569 goto out;
7570 }
7571
7572 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
7573 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
7574 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
7575 test_bit(OP_LOCKU, sp->allow.u.longs)) {
7576 dfprintk(MOUNT, " cleanup mode enabled\n");
7577 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
7578 }
7579
7580 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
7581 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
7582 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
7583 }
7584
7585 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
7586 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
7587 dfprintk(MOUNT, " secinfo mode enabled\n");
7588 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
7589 }
7590
7591 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
7592 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
7593 dfprintk(MOUNT, " stateid mode enabled\n");
7594 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
7595 }
7596
7597 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
7598 dfprintk(MOUNT, " write mode enabled\n");
7599 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
7600 }
7601
7602 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
7603 dfprintk(MOUNT, " commit mode enabled\n");
7604 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
7605 }
7606 }
7607 out:
7608 clp->cl_sp4_flags = flags;
7609 return 0;
7610 }
7611
7612 struct nfs41_exchange_id_data {
7613 struct nfs41_exchange_id_res res;
7614 struct nfs41_exchange_id_args args;
7615 };
7616
7617 static void nfs4_exchange_id_release(void *data)
7618 {
7619 struct nfs41_exchange_id_data *cdata =
7620 (struct nfs41_exchange_id_data *)data;
7621
7622 nfs_put_client(cdata->args.client);
7623 kfree(cdata->res.impl_id);
7624 kfree(cdata->res.server_scope);
7625 kfree(cdata->res.server_owner);
7626 kfree(cdata);
7627 }
7628
7629 static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
7630 .rpc_release = nfs4_exchange_id_release,
7631 };
7632
7633 /*
7634 * _nfs4_proc_exchange_id()
7635 *
7636 * Wrapper for EXCHANGE_ID operation.
7637 */
7638 static struct rpc_task *
7639 nfs4_run_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
7640 u32 sp4_how, struct rpc_xprt *xprt)
7641 {
7642 struct rpc_message msg = {
7643 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
7644 .rpc_cred = cred,
7645 };
7646 struct rpc_task_setup task_setup_data = {
7647 .rpc_client = clp->cl_rpcclient,
7648 .callback_ops = &nfs4_exchange_id_call_ops,
7649 .rpc_message = &msg,
7650 .flags = RPC_TASK_TIMEOUT,
7651 };
7652 struct nfs41_exchange_id_data *calldata;
7653 int status;
7654
7655 if (!refcount_inc_not_zero(&clp->cl_count))
7656 return ERR_PTR(-EIO);
7657
7658 status = -ENOMEM;
7659 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7660 if (!calldata)
7661 goto out;
7662
7663 nfs4_init_boot_verifier(clp, &calldata->args.verifier);
7664
7665 status = nfs4_init_uniform_client_string(clp);
7666 if (status)
7667 goto out_calldata;
7668
7669 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
7670 GFP_NOFS);
7671 status = -ENOMEM;
7672 if (unlikely(calldata->res.server_owner == NULL))
7673 goto out_calldata;
7674
7675 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
7676 GFP_NOFS);
7677 if (unlikely(calldata->res.server_scope == NULL))
7678 goto out_server_owner;
7679
7680 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
7681 if (unlikely(calldata->res.impl_id == NULL))
7682 goto out_server_scope;
7683
7684 switch (sp4_how) {
7685 case SP4_NONE:
7686 calldata->args.state_protect.how = SP4_NONE;
7687 break;
7688
7689 case SP4_MACH_CRED:
7690 calldata->args.state_protect = nfs4_sp4_mach_cred_request;
7691 break;
7692
7693 default:
7694 /* unsupported! */
7695 WARN_ON_ONCE(1);
7696 status = -EINVAL;
7697 goto out_impl_id;
7698 }
7699 if (xprt) {
7700 task_setup_data.rpc_xprt = xprt;
7701 task_setup_data.flags |= RPC_TASK_SOFTCONN;
7702 memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
7703 sizeof(calldata->args.verifier.data));
7704 }
7705 calldata->args.client = clp;
7706 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
7707 EXCHGID4_FLAG_BIND_PRINC_STATEID;
7708 #ifdef CONFIG_NFS_V4_1_MIGRATION
7709 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
7710 #endif
7711 msg.rpc_argp = &calldata->args;
7712 msg.rpc_resp = &calldata->res;
7713 task_setup_data.callback_data = calldata;
7714
7715 return rpc_run_task(&task_setup_data);
7716
7717 out_impl_id:
7718 kfree(calldata->res.impl_id);
7719 out_server_scope:
7720 kfree(calldata->res.server_scope);
7721 out_server_owner:
7722 kfree(calldata->res.server_owner);
7723 out_calldata:
7724 kfree(calldata);
7725 out:
7726 nfs_put_client(clp);
7727 return ERR_PTR(status);
7728 }
7729
7730 /*
7731 * _nfs4_proc_exchange_id()
7732 *
7733 * Wrapper for EXCHANGE_ID operation.
7734 */
7735 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
7736 u32 sp4_how)
7737 {
7738 struct rpc_task *task;
7739 struct nfs41_exchange_id_args *argp;
7740 struct nfs41_exchange_id_res *resp;
7741 int status;
7742
7743 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
7744 if (IS_ERR(task))
7745 return PTR_ERR(task);
7746
7747 argp = task->tk_msg.rpc_argp;
7748 resp = task->tk_msg.rpc_resp;
7749 status = task->tk_status;
7750 if (status != 0)
7751 goto out;
7752
7753 status = nfs4_check_cl_exchange_flags(resp->flags);
7754 if (status != 0)
7755 goto out;
7756
7757 status = nfs4_sp4_select_mode(clp, &resp->state_protect);
7758 if (status != 0)
7759 goto out;
7760
7761 clp->cl_clientid = resp->clientid;
7762 clp->cl_exchange_flags = resp->flags;
7763 clp->cl_seqid = resp->seqid;
7764 /* Client ID is not confirmed */
7765 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
7766 clear_bit(NFS4_SESSION_ESTABLISHED,
7767 &clp->cl_session->session_state);
7768
7769 if (clp->cl_serverscope != NULL &&
7770 !nfs41_same_server_scope(clp->cl_serverscope,
7771 resp->server_scope)) {
7772 dprintk("%s: server_scope mismatch detected\n",
7773 __func__);
7774 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7775 }
7776
7777 swap(clp->cl_serverowner, resp->server_owner);
7778 swap(clp->cl_serverscope, resp->server_scope);
7779 swap(clp->cl_implid, resp->impl_id);
7780
7781 /* Save the EXCHANGE_ID verifier session trunk tests */
7782 memcpy(clp->cl_confirm.data, argp->verifier.data,
7783 sizeof(clp->cl_confirm.data));
7784 out:
7785 trace_nfs4_exchange_id(clp, status);
7786 rpc_put_task(task);
7787 return status;
7788 }
7789
7790 /*
7791 * nfs4_proc_exchange_id()
7792 *
7793 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7794 *
7795 * Since the clientid has expired, all compounds using sessions
7796 * associated with the stale clientid will be returning
7797 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7798 * be in some phase of session reset.
7799 *
7800 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7801 */
7802 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7803 {
7804 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7805 int status;
7806
7807 /* try SP4_MACH_CRED if krb5i/p */
7808 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7809 authflavor == RPC_AUTH_GSS_KRB5P) {
7810 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7811 if (!status)
7812 return 0;
7813 }
7814
7815 /* try SP4_NONE */
7816 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7817 }
7818
7819 /**
7820 * nfs4_test_session_trunk
7821 *
7822 * This is an add_xprt_test() test function called from
7823 * rpc_clnt_setup_test_and_add_xprt.
7824 *
7825 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
7826 * and is dereferrenced in nfs4_exchange_id_release
7827 *
7828 * Upon success, add the new transport to the rpc_clnt
7829 *
7830 * @clnt: struct rpc_clnt to get new transport
7831 * @xprt: the rpc_xprt to test
7832 * @data: call data for _nfs4_proc_exchange_id.
7833 */
7834 int nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
7835 void *data)
7836 {
7837 struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data;
7838 struct rpc_task *task;
7839 int status;
7840
7841 u32 sp4_how;
7842
7843 dprintk("--> %s try %s\n", __func__,
7844 xprt->address_strings[RPC_DISPLAY_ADDR]);
7845
7846 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
7847
7848 /* Test connection for session trunking. Async exchange_id call */
7849 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
7850 if (IS_ERR(task))
7851 return PTR_ERR(task);
7852
7853 status = task->tk_status;
7854 if (status == 0)
7855 status = nfs4_detect_session_trunking(adata->clp,
7856 task->tk_msg.rpc_resp, xprt);
7857
7858 rpc_put_task(task);
7859 return status;
7860 }
7861 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
7862
7863 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7864 struct rpc_cred *cred)
7865 {
7866 struct rpc_message msg = {
7867 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7868 .rpc_argp = clp,
7869 .rpc_cred = cred,
7870 };
7871 int status;
7872
7873 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7874 trace_nfs4_destroy_clientid(clp, status);
7875 if (status)
7876 dprintk("NFS: Got error %d from the server %s on "
7877 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7878 return status;
7879 }
7880
7881 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7882 struct rpc_cred *cred)
7883 {
7884 unsigned int loop;
7885 int ret;
7886
7887 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7888 ret = _nfs4_proc_destroy_clientid(clp, cred);
7889 switch (ret) {
7890 case -NFS4ERR_DELAY:
7891 case -NFS4ERR_CLIENTID_BUSY:
7892 ssleep(1);
7893 break;
7894 default:
7895 return ret;
7896 }
7897 }
7898 return 0;
7899 }
7900
7901 int nfs4_destroy_clientid(struct nfs_client *clp)
7902 {
7903 struct rpc_cred *cred;
7904 int ret = 0;
7905
7906 if (clp->cl_mvops->minor_version < 1)
7907 goto out;
7908 if (clp->cl_exchange_flags == 0)
7909 goto out;
7910 if (clp->cl_preserve_clid)
7911 goto out;
7912 cred = nfs4_get_clid_cred(clp);
7913 ret = nfs4_proc_destroy_clientid(clp, cred);
7914 if (cred)
7915 put_rpccred(cred);
7916 switch (ret) {
7917 case 0:
7918 case -NFS4ERR_STALE_CLIENTID:
7919 clp->cl_exchange_flags = 0;
7920 }
7921 out:
7922 return ret;
7923 }
7924
7925 struct nfs4_get_lease_time_data {
7926 struct nfs4_get_lease_time_args *args;
7927 struct nfs4_get_lease_time_res *res;
7928 struct nfs_client *clp;
7929 };
7930
7931 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7932 void *calldata)
7933 {
7934 struct nfs4_get_lease_time_data *data =
7935 (struct nfs4_get_lease_time_data *)calldata;
7936
7937 dprintk("--> %s\n", __func__);
7938 /* just setup sequence, do not trigger session recovery
7939 since we're invoked within one */
7940 nfs4_setup_sequence(data->clp,
7941 &data->args->la_seq_args,
7942 &data->res->lr_seq_res,
7943 task);
7944 dprintk("<-- %s\n", __func__);
7945 }
7946
7947 /*
7948 * Called from nfs4_state_manager thread for session setup, so don't recover
7949 * from sequence operation or clientid errors.
7950 */
7951 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7952 {
7953 struct nfs4_get_lease_time_data *data =
7954 (struct nfs4_get_lease_time_data *)calldata;
7955
7956 dprintk("--> %s\n", __func__);
7957 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7958 return;
7959 switch (task->tk_status) {
7960 case -NFS4ERR_DELAY:
7961 case -NFS4ERR_GRACE:
7962 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7963 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7964 task->tk_status = 0;
7965 /* fall through */
7966 case -NFS4ERR_RETRY_UNCACHED_REP:
7967 rpc_restart_call_prepare(task);
7968 return;
7969 }
7970 dprintk("<-- %s\n", __func__);
7971 }
7972
7973 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7974 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7975 .rpc_call_done = nfs4_get_lease_time_done,
7976 };
7977
7978 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7979 {
7980 struct rpc_task *task;
7981 struct nfs4_get_lease_time_args args;
7982 struct nfs4_get_lease_time_res res = {
7983 .lr_fsinfo = fsinfo,
7984 };
7985 struct nfs4_get_lease_time_data data = {
7986 .args = &args,
7987 .res = &res,
7988 .clp = clp,
7989 };
7990 struct rpc_message msg = {
7991 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7992 .rpc_argp = &args,
7993 .rpc_resp = &res,
7994 };
7995 struct rpc_task_setup task_setup = {
7996 .rpc_client = clp->cl_rpcclient,
7997 .rpc_message = &msg,
7998 .callback_ops = &nfs4_get_lease_time_ops,
7999 .callback_data = &data,
8000 .flags = RPC_TASK_TIMEOUT,
8001 };
8002 int status;
8003
8004 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
8005 nfs4_set_sequence_privileged(&args.la_seq_args);
8006 task = rpc_run_task(&task_setup);
8007
8008 if (IS_ERR(task))
8009 return PTR_ERR(task);
8010
8011 status = task->tk_status;
8012 rpc_put_task(task);
8013 return status;
8014 }
8015
8016 /*
8017 * Initialize the values to be used by the client in CREATE_SESSION
8018 * If nfs4_init_session set the fore channel request and response sizes,
8019 * use them.
8020 *
8021 * Set the back channel max_resp_sz_cached to zero to force the client to
8022 * always set csa_cachethis to FALSE because the current implementation
8023 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
8024 */
8025 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
8026 struct rpc_clnt *clnt)
8027 {
8028 unsigned int max_rqst_sz, max_resp_sz;
8029 unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
8030
8031 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
8032 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
8033
8034 /* Fore channel attributes */
8035 args->fc_attrs.max_rqst_sz = max_rqst_sz;
8036 args->fc_attrs.max_resp_sz = max_resp_sz;
8037 args->fc_attrs.max_ops = NFS4_MAX_OPS;
8038 args->fc_attrs.max_reqs = max_session_slots;
8039
8040 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
8041 "max_ops=%u max_reqs=%u\n",
8042 __func__,
8043 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
8044 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
8045
8046 /* Back channel attributes */
8047 args->bc_attrs.max_rqst_sz = max_bc_payload;
8048 args->bc_attrs.max_resp_sz = max_bc_payload;
8049 args->bc_attrs.max_resp_sz_cached = 0;
8050 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
8051 args->bc_attrs.max_reqs = min_t(unsigned short, max_session_cb_slots, 1);
8052
8053 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
8054 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
8055 __func__,
8056 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
8057 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
8058 args->bc_attrs.max_reqs);
8059 }
8060
8061 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
8062 struct nfs41_create_session_res *res)
8063 {
8064 struct nfs4_channel_attrs *sent = &args->fc_attrs;
8065 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
8066
8067 if (rcvd->max_resp_sz > sent->max_resp_sz)
8068 return -EINVAL;
8069 /*
8070 * Our requested max_ops is the minimum we need; we're not
8071 * prepared to break up compounds into smaller pieces than that.
8072 * So, no point even trying to continue if the server won't
8073 * cooperate:
8074 */
8075 if (rcvd->max_ops < sent->max_ops)
8076 return -EINVAL;
8077 if (rcvd->max_reqs == 0)
8078 return -EINVAL;
8079 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
8080 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
8081 return 0;
8082 }
8083
8084 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
8085 struct nfs41_create_session_res *res)
8086 {
8087 struct nfs4_channel_attrs *sent = &args->bc_attrs;
8088 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
8089
8090 if (!(res->flags & SESSION4_BACK_CHAN))
8091 goto out;
8092 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
8093 return -EINVAL;
8094 if (rcvd->max_resp_sz < sent->max_resp_sz)
8095 return -EINVAL;
8096 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
8097 return -EINVAL;
8098 if (rcvd->max_ops > sent->max_ops)
8099 return -EINVAL;
8100 if (rcvd->max_reqs > sent->max_reqs)
8101 return -EINVAL;
8102 out:
8103 return 0;
8104 }
8105
8106 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
8107 struct nfs41_create_session_res *res)
8108 {
8109 int ret;
8110
8111 ret = nfs4_verify_fore_channel_attrs(args, res);
8112 if (ret)
8113 return ret;
8114 return nfs4_verify_back_channel_attrs(args, res);
8115 }
8116
8117 static void nfs4_update_session(struct nfs4_session *session,
8118 struct nfs41_create_session_res *res)
8119 {
8120 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
8121 /* Mark client id and session as being confirmed */
8122 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
8123 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
8124 session->flags = res->flags;
8125 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
8126 if (res->flags & SESSION4_BACK_CHAN)
8127 memcpy(&session->bc_attrs, &res->bc_attrs,
8128 sizeof(session->bc_attrs));
8129 }
8130
8131 static int _nfs4_proc_create_session(struct nfs_client *clp,
8132 struct rpc_cred *cred)
8133 {
8134 struct nfs4_session *session = clp->cl_session;
8135 struct nfs41_create_session_args args = {
8136 .client = clp,
8137 .clientid = clp->cl_clientid,
8138 .seqid = clp->cl_seqid,
8139 .cb_program = NFS4_CALLBACK,
8140 };
8141 struct nfs41_create_session_res res;
8142
8143 struct rpc_message msg = {
8144 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
8145 .rpc_argp = &args,
8146 .rpc_resp = &res,
8147 .rpc_cred = cred,
8148 };
8149 int status;
8150
8151 nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
8152 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
8153
8154 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
8155 trace_nfs4_create_session(clp, status);
8156
8157 switch (status) {
8158 case -NFS4ERR_STALE_CLIENTID:
8159 case -NFS4ERR_DELAY:
8160 case -ETIMEDOUT:
8161 case -EACCES:
8162 case -EAGAIN:
8163 goto out;
8164 };
8165
8166 clp->cl_seqid++;
8167 if (!status) {
8168 /* Verify the session's negotiated channel_attrs values */
8169 status = nfs4_verify_channel_attrs(&args, &res);
8170 /* Increment the clientid slot sequence id */
8171 if (status)
8172 goto out;
8173 nfs4_update_session(session, &res);
8174 }
8175 out:
8176 return status;
8177 }
8178
8179 /*
8180 * Issues a CREATE_SESSION operation to the server.
8181 * It is the responsibility of the caller to verify the session is
8182 * expired before calling this routine.
8183 */
8184 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
8185 {
8186 int status;
8187 unsigned *ptr;
8188 struct nfs4_session *session = clp->cl_session;
8189
8190 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
8191
8192 status = _nfs4_proc_create_session(clp, cred);
8193 if (status)
8194 goto out;
8195
8196 /* Init or reset the session slot tables */
8197 status = nfs4_setup_session_slot_tables(session);
8198 dprintk("slot table setup returned %d\n", status);
8199 if (status)
8200 goto out;
8201
8202 ptr = (unsigned *)&session->sess_id.data[0];
8203 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
8204 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
8205 out:
8206 dprintk("<-- %s\n", __func__);
8207 return status;
8208 }
8209
8210 /*
8211 * Issue the over-the-wire RPC DESTROY_SESSION.
8212 * The caller must serialize access to this routine.
8213 */
8214 int nfs4_proc_destroy_session(struct nfs4_session *session,
8215 struct rpc_cred *cred)
8216 {
8217 struct rpc_message msg = {
8218 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
8219 .rpc_argp = session,
8220 .rpc_cred = cred,
8221 };
8222 int status = 0;
8223
8224 dprintk("--> nfs4_proc_destroy_session\n");
8225
8226 /* session is still being setup */
8227 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
8228 return 0;
8229
8230 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
8231 trace_nfs4_destroy_session(session->clp, status);
8232
8233 if (status)
8234 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
8235 "Session has been destroyed regardless...\n", status);
8236
8237 dprintk("<-- nfs4_proc_destroy_session\n");
8238 return status;
8239 }
8240
8241 /*
8242 * Renew the cl_session lease.
8243 */
8244 struct nfs4_sequence_data {
8245 struct nfs_client *clp;
8246 struct nfs4_sequence_args args;
8247 struct nfs4_sequence_res res;
8248 };
8249
8250 static void nfs41_sequence_release(void *data)
8251 {
8252 struct nfs4_sequence_data *calldata = data;
8253 struct nfs_client *clp = calldata->clp;
8254
8255 if (refcount_read(&clp->cl_count) > 1)
8256 nfs4_schedule_state_renewal(clp);
8257 nfs_put_client(clp);
8258 kfree(calldata);
8259 }
8260
8261 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
8262 {
8263 switch(task->tk_status) {
8264 case -NFS4ERR_DELAY:
8265 rpc_delay(task, NFS4_POLL_RETRY_MAX);
8266 return -EAGAIN;
8267 default:
8268 nfs4_schedule_lease_recovery(clp);
8269 }
8270 return 0;
8271 }
8272
8273 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
8274 {
8275 struct nfs4_sequence_data *calldata = data;
8276 struct nfs_client *clp = calldata->clp;
8277
8278 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
8279 return;
8280
8281 trace_nfs4_sequence(clp, task->tk_status);
8282 if (task->tk_status < 0) {
8283 dprintk("%s ERROR %d\n", __func__, task->tk_status);
8284 if (refcount_read(&clp->cl_count) == 1)
8285 goto out;
8286
8287 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
8288 rpc_restart_call_prepare(task);
8289 return;
8290 }
8291 }
8292 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
8293 out:
8294 dprintk("<-- %s\n", __func__);
8295 }
8296
8297 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
8298 {
8299 struct nfs4_sequence_data *calldata = data;
8300 struct nfs_client *clp = calldata->clp;
8301 struct nfs4_sequence_args *args;
8302 struct nfs4_sequence_res *res;
8303
8304 args = task->tk_msg.rpc_argp;
8305 res = task->tk_msg.rpc_resp;
8306
8307 nfs4_setup_sequence(clp, args, res, task);
8308 }
8309
8310 static const struct rpc_call_ops nfs41_sequence_ops = {
8311 .rpc_call_done = nfs41_sequence_call_done,
8312 .rpc_call_prepare = nfs41_sequence_prepare,
8313 .rpc_release = nfs41_sequence_release,
8314 };
8315
8316 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
8317 struct rpc_cred *cred,
8318 struct nfs4_slot *slot,
8319 bool is_privileged)
8320 {
8321 struct nfs4_sequence_data *calldata;
8322 struct rpc_message msg = {
8323 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
8324 .rpc_cred = cred,
8325 };
8326 struct rpc_task_setup task_setup_data = {
8327 .rpc_client = clp->cl_rpcclient,
8328 .rpc_message = &msg,
8329 .callback_ops = &nfs41_sequence_ops,
8330 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
8331 };
8332 struct rpc_task *ret;
8333
8334 ret = ERR_PTR(-EIO);
8335 if (!refcount_inc_not_zero(&clp->cl_count))
8336 goto out_err;
8337
8338 ret = ERR_PTR(-ENOMEM);
8339 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8340 if (calldata == NULL)
8341 goto out_put_clp;
8342 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
8343 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
8344 if (is_privileged)
8345 nfs4_set_sequence_privileged(&calldata->args);
8346 msg.rpc_argp = &calldata->args;
8347 msg.rpc_resp = &calldata->res;
8348 calldata->clp = clp;
8349 task_setup_data.callback_data = calldata;
8350
8351 ret = rpc_run_task(&task_setup_data);
8352 if (IS_ERR(ret))
8353 goto out_err;
8354 return ret;
8355 out_put_clp:
8356 nfs_put_client(clp);
8357 out_err:
8358 nfs41_release_slot(slot);
8359 return ret;
8360 }
8361
8362 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
8363 {
8364 struct rpc_task *task;
8365 int ret = 0;
8366
8367 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
8368 return -EAGAIN;
8369 task = _nfs41_proc_sequence(clp, cred, NULL, false);
8370 if (IS_ERR(task))
8371 ret = PTR_ERR(task);
8372 else
8373 rpc_put_task_async(task);
8374 dprintk("<-- %s status=%d\n", __func__, ret);
8375 return ret;
8376 }
8377
8378 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
8379 {
8380 struct rpc_task *task;
8381 int ret;
8382
8383 task = _nfs41_proc_sequence(clp, cred, NULL, true);
8384 if (IS_ERR(task)) {
8385 ret = PTR_ERR(task);
8386 goto out;
8387 }
8388 ret = rpc_wait_for_completion_task(task);
8389 if (!ret)
8390 ret = task->tk_status;
8391 rpc_put_task(task);
8392 out:
8393 dprintk("<-- %s status=%d\n", __func__, ret);
8394 return ret;
8395 }
8396
8397 struct nfs4_reclaim_complete_data {
8398 struct nfs_client *clp;
8399 struct nfs41_reclaim_complete_args arg;
8400 struct nfs41_reclaim_complete_res res;
8401 };
8402
8403 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
8404 {
8405 struct nfs4_reclaim_complete_data *calldata = data;
8406
8407 nfs4_setup_sequence(calldata->clp,
8408 &calldata->arg.seq_args,
8409 &calldata->res.seq_res,
8410 task);
8411 }
8412
8413 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
8414 {
8415 switch(task->tk_status) {
8416 case 0:
8417 case -NFS4ERR_COMPLETE_ALREADY:
8418 case -NFS4ERR_WRONG_CRED: /* What to do here? */
8419 break;
8420 case -NFS4ERR_DELAY:
8421 rpc_delay(task, NFS4_POLL_RETRY_MAX);
8422 /* fall through */
8423 case -NFS4ERR_RETRY_UNCACHED_REP:
8424 return -EAGAIN;
8425 case -NFS4ERR_BADSESSION:
8426 case -NFS4ERR_DEADSESSION:
8427 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
8428 nfs4_schedule_session_recovery(clp->cl_session,
8429 task->tk_status);
8430 break;
8431 default:
8432 nfs4_schedule_lease_recovery(clp);
8433 }
8434 return 0;
8435 }
8436
8437 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
8438 {
8439 struct nfs4_reclaim_complete_data *calldata = data;
8440 struct nfs_client *clp = calldata->clp;
8441 struct nfs4_sequence_res *res = &calldata->res.seq_res;
8442
8443 dprintk("--> %s\n", __func__);
8444 if (!nfs41_sequence_done(task, res))
8445 return;
8446
8447 trace_nfs4_reclaim_complete(clp, task->tk_status);
8448 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
8449 rpc_restart_call_prepare(task);
8450 return;
8451 }
8452 dprintk("<-- %s\n", __func__);
8453 }
8454
8455 static void nfs4_free_reclaim_complete_data(void *data)
8456 {
8457 struct nfs4_reclaim_complete_data *calldata = data;
8458
8459 kfree(calldata);
8460 }
8461
8462 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
8463 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
8464 .rpc_call_done = nfs4_reclaim_complete_done,
8465 .rpc_release = nfs4_free_reclaim_complete_data,
8466 };
8467
8468 /*
8469 * Issue a global reclaim complete.
8470 */
8471 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
8472 struct rpc_cred *cred)
8473 {
8474 struct nfs4_reclaim_complete_data *calldata;
8475 struct rpc_task *task;
8476 struct rpc_message msg = {
8477 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
8478 .rpc_cred = cred,
8479 };
8480 struct rpc_task_setup task_setup_data = {
8481 .rpc_client = clp->cl_rpcclient,
8482 .rpc_message = &msg,
8483 .callback_ops = &nfs4_reclaim_complete_call_ops,
8484 .flags = RPC_TASK_ASYNC,
8485 };
8486 int status = -ENOMEM;
8487
8488 dprintk("--> %s\n", __func__);
8489 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8490 if (calldata == NULL)
8491 goto out;
8492 calldata->clp = clp;
8493 calldata->arg.one_fs = 0;
8494
8495 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
8496 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
8497 msg.rpc_argp = &calldata->arg;
8498 msg.rpc_resp = &calldata->res;
8499 task_setup_data.callback_data = calldata;
8500 task = rpc_run_task(&task_setup_data);
8501 if (IS_ERR(task)) {
8502 status = PTR_ERR(task);
8503 goto out;
8504 }
8505 status = rpc_wait_for_completion_task(task);
8506 if (status == 0)
8507 status = task->tk_status;
8508 rpc_put_task(task);
8509 out:
8510 dprintk("<-- %s status=%d\n", __func__, status);
8511 return status;
8512 }
8513
8514 static void
8515 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
8516 {
8517 struct nfs4_layoutget *lgp = calldata;
8518 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
8519
8520 dprintk("--> %s\n", __func__);
8521 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
8522 &lgp->res.seq_res, task);
8523 dprintk("<-- %s\n", __func__);
8524 }
8525
8526 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
8527 {
8528 struct nfs4_layoutget *lgp = calldata;
8529
8530 dprintk("--> %s\n", __func__);
8531 nfs41_sequence_process(task, &lgp->res.seq_res);
8532 dprintk("<-- %s\n", __func__);
8533 }
8534
8535 static int
8536 nfs4_layoutget_handle_exception(struct rpc_task *task,
8537 struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
8538 {
8539 struct inode *inode = lgp->args.inode;
8540 struct nfs_server *server = NFS_SERVER(inode);
8541 struct pnfs_layout_hdr *lo;
8542 int nfs4err = task->tk_status;
8543 int err, status = 0;
8544 LIST_HEAD(head);
8545
8546 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
8547
8548 switch (nfs4err) {
8549 case 0:
8550 goto out;
8551
8552 /*
8553 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
8554 * on the file. set tk_status to -ENODATA to tell upper layer to
8555 * retry go inband.
8556 */
8557 case -NFS4ERR_LAYOUTUNAVAILABLE:
8558 status = -ENODATA;
8559 goto out;
8560 /*
8561 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
8562 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
8563 */
8564 case -NFS4ERR_BADLAYOUT:
8565 status = -EOVERFLOW;
8566 goto out;
8567 /*
8568 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
8569 * (or clients) writing to the same RAID stripe except when
8570 * the minlength argument is 0 (see RFC5661 section 18.43.3).
8571 *
8572 * Treat it like we would RECALLCONFLICT -- we retry for a little
8573 * while, and then eventually give up.
8574 */
8575 case -NFS4ERR_LAYOUTTRYLATER:
8576 if (lgp->args.minlength == 0) {
8577 status = -EOVERFLOW;
8578 goto out;
8579 }
8580 status = -EBUSY;
8581 break;
8582 case -NFS4ERR_RECALLCONFLICT:
8583 status = -ERECALLCONFLICT;
8584 break;
8585 case -NFS4ERR_DELEG_REVOKED:
8586 case -NFS4ERR_ADMIN_REVOKED:
8587 case -NFS4ERR_EXPIRED:
8588 case -NFS4ERR_BAD_STATEID:
8589 exception->timeout = 0;
8590 spin_lock(&inode->i_lock);
8591 lo = NFS_I(inode)->layout;
8592 /* If the open stateid was bad, then recover it. */
8593 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
8594 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
8595 spin_unlock(&inode->i_lock);
8596 exception->state = lgp->args.ctx->state;
8597 exception->stateid = &lgp->args.stateid;
8598 break;
8599 }
8600
8601 /*
8602 * Mark the bad layout state as invalid, then retry
8603 */
8604 pnfs_mark_layout_stateid_invalid(lo, &head);
8605 spin_unlock(&inode->i_lock);
8606 nfs_commit_inode(inode, 0);
8607 pnfs_free_lseg_list(&head);
8608 status = -EAGAIN;
8609 goto out;
8610 }
8611
8612 nfs4_sequence_free_slot(&lgp->res.seq_res);
8613 err = nfs4_handle_exception(server, nfs4err, exception);
8614 if (!status) {
8615 if (exception->retry)
8616 status = -EAGAIN;
8617 else
8618 status = err;
8619 }
8620 out:
8621 dprintk("<-- %s\n", __func__);
8622 return status;
8623 }
8624
8625 static size_t max_response_pages(struct nfs_server *server)
8626 {
8627 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
8628 return nfs_page_array_len(0, max_resp_sz);
8629 }
8630
8631 static void nfs4_free_pages(struct page **pages, size_t size)
8632 {
8633 int i;
8634
8635 if (!pages)
8636 return;
8637
8638 for (i = 0; i < size; i++) {
8639 if (!pages[i])
8640 break;
8641 __free_page(pages[i]);
8642 }
8643 kfree(pages);
8644 }
8645
8646 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
8647 {
8648 struct page **pages;
8649 int i;
8650
8651 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
8652 if (!pages) {
8653 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
8654 return NULL;
8655 }
8656
8657 for (i = 0; i < size; i++) {
8658 pages[i] = alloc_page(gfp_flags);
8659 if (!pages[i]) {
8660 dprintk("%s: failed to allocate page\n", __func__);
8661 nfs4_free_pages(pages, size);
8662 return NULL;
8663 }
8664 }
8665
8666 return pages;
8667 }
8668
8669 static void nfs4_layoutget_release(void *calldata)
8670 {
8671 struct nfs4_layoutget *lgp = calldata;
8672 struct inode *inode = lgp->args.inode;
8673 struct nfs_server *server = NFS_SERVER(inode);
8674 size_t max_pages = max_response_pages(server);
8675
8676 dprintk("--> %s\n", __func__);
8677 nfs4_sequence_free_slot(&lgp->res.seq_res);
8678 nfs4_free_pages(lgp->args.layout.pages, max_pages);
8679 pnfs_put_layout_hdr(NFS_I(inode)->layout);
8680 put_nfs_open_context(lgp->args.ctx);
8681 kfree(calldata);
8682 dprintk("<-- %s\n", __func__);
8683 }
8684
8685 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
8686 .rpc_call_prepare = nfs4_layoutget_prepare,
8687 .rpc_call_done = nfs4_layoutget_done,
8688 .rpc_release = nfs4_layoutget_release,
8689 };
8690
8691 struct pnfs_layout_segment *
8692 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
8693 {
8694 struct inode *inode = lgp->args.inode;
8695 struct nfs_server *server = NFS_SERVER(inode);
8696 size_t max_pages = max_response_pages(server);
8697 struct rpc_task *task;
8698 struct rpc_message msg = {
8699 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
8700 .rpc_argp = &lgp->args,
8701 .rpc_resp = &lgp->res,
8702 .rpc_cred = lgp->cred,
8703 };
8704 struct rpc_task_setup task_setup_data = {
8705 .rpc_client = server->client,
8706 .rpc_message = &msg,
8707 .callback_ops = &nfs4_layoutget_call_ops,
8708 .callback_data = lgp,
8709 .flags = RPC_TASK_ASYNC,
8710 };
8711 struct pnfs_layout_segment *lseg = NULL;
8712 struct nfs4_exception exception = {
8713 .inode = inode,
8714 .timeout = *timeout,
8715 };
8716 int status = 0;
8717
8718 dprintk("--> %s\n", __func__);
8719
8720 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
8721 pnfs_get_layout_hdr(NFS_I(inode)->layout);
8722
8723 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
8724 if (!lgp->args.layout.pages) {
8725 nfs4_layoutget_release(lgp);
8726 return ERR_PTR(-ENOMEM);
8727 }
8728 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
8729
8730 lgp->res.layoutp = &lgp->args.layout;
8731 lgp->res.seq_res.sr_slot = NULL;
8732 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
8733
8734 task = rpc_run_task(&task_setup_data);
8735 if (IS_ERR(task))
8736 return ERR_CAST(task);
8737 status = rpc_wait_for_completion_task(task);
8738 if (status == 0) {
8739 status = nfs4_layoutget_handle_exception(task, lgp, &exception);
8740 *timeout = exception.timeout;
8741 }
8742
8743 trace_nfs4_layoutget(lgp->args.ctx,
8744 &lgp->args.range,
8745 &lgp->res.range,
8746 &lgp->res.stateid,
8747 status);
8748
8749 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
8750 if (status == 0 && lgp->res.layoutp->len)
8751 lseg = pnfs_layout_process(lgp);
8752 rpc_put_task(task);
8753 dprintk("<-- %s status=%d\n", __func__, status);
8754 if (status)
8755 return ERR_PTR(status);
8756 return lseg;
8757 }
8758
8759 static void
8760 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
8761 {
8762 struct nfs4_layoutreturn *lrp = calldata;
8763
8764 dprintk("--> %s\n", __func__);
8765 nfs4_setup_sequence(lrp->clp,
8766 &lrp->args.seq_args,
8767 &lrp->res.seq_res,
8768 task);
8769 }
8770
8771 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
8772 {
8773 struct nfs4_layoutreturn *lrp = calldata;
8774 struct nfs_server *server;
8775
8776 dprintk("--> %s\n", __func__);
8777
8778 if (!nfs41_sequence_process(task, &lrp->res.seq_res))
8779 return;
8780
8781 server = NFS_SERVER(lrp->args.inode);
8782 switch (task->tk_status) {
8783 case -NFS4ERR_OLD_STATEID:
8784 if (nfs4_refresh_layout_stateid(&lrp->args.stateid,
8785 lrp->args.inode))
8786 goto out_restart;
8787 /* Fallthrough */
8788 default:
8789 task->tk_status = 0;
8790 /* Fallthrough */
8791 case 0:
8792 break;
8793 case -NFS4ERR_DELAY:
8794 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
8795 break;
8796 goto out_restart;
8797 }
8798 dprintk("<-- %s\n", __func__);
8799 return;
8800 out_restart:
8801 task->tk_status = 0;
8802 nfs4_sequence_free_slot(&lrp->res.seq_res);
8803 rpc_restart_call_prepare(task);
8804 }
8805
8806 static void nfs4_layoutreturn_release(void *calldata)
8807 {
8808 struct nfs4_layoutreturn *lrp = calldata;
8809 struct pnfs_layout_hdr *lo = lrp->args.layout;
8810
8811 dprintk("--> %s\n", __func__);
8812 pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
8813 lrp->res.lrs_present ? &lrp->res.stateid : NULL);
8814 nfs4_sequence_free_slot(&lrp->res.seq_res);
8815 if (lrp->ld_private.ops && lrp->ld_private.ops->free)
8816 lrp->ld_private.ops->free(&lrp->ld_private);
8817 pnfs_put_layout_hdr(lrp->args.layout);
8818 nfs_iput_and_deactive(lrp->inode);
8819 kfree(calldata);
8820 dprintk("<-- %s\n", __func__);
8821 }
8822
8823 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
8824 .rpc_call_prepare = nfs4_layoutreturn_prepare,
8825 .rpc_call_done = nfs4_layoutreturn_done,
8826 .rpc_release = nfs4_layoutreturn_release,
8827 };
8828
8829 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
8830 {
8831 struct rpc_task *task;
8832 struct rpc_message msg = {
8833 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
8834 .rpc_argp = &lrp->args,
8835 .rpc_resp = &lrp->res,
8836 .rpc_cred = lrp->cred,
8837 };
8838 struct rpc_task_setup task_setup_data = {
8839 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8840 .rpc_message = &msg,
8841 .callback_ops = &nfs4_layoutreturn_call_ops,
8842 .callback_data = lrp,
8843 };
8844 int status = 0;
8845
8846 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
8847 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
8848 &task_setup_data.rpc_client, &msg);
8849
8850 dprintk("--> %s\n", __func__);
8851 if (!sync) {
8852 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8853 if (!lrp->inode) {
8854 nfs4_layoutreturn_release(lrp);
8855 return -EAGAIN;
8856 }
8857 task_setup_data.flags |= RPC_TASK_ASYNC;
8858 }
8859 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8860 task = rpc_run_task(&task_setup_data);
8861 if (IS_ERR(task))
8862 return PTR_ERR(task);
8863 if (sync)
8864 status = task->tk_status;
8865 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
8866 dprintk("<-- %s status=%d\n", __func__, status);
8867 rpc_put_task(task);
8868 return status;
8869 }
8870
8871 static int
8872 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
8873 struct pnfs_device *pdev,
8874 struct rpc_cred *cred)
8875 {
8876 struct nfs4_getdeviceinfo_args args = {
8877 .pdev = pdev,
8878 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8879 NOTIFY_DEVICEID4_DELETE,
8880 };
8881 struct nfs4_getdeviceinfo_res res = {
8882 .pdev = pdev,
8883 };
8884 struct rpc_message msg = {
8885 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8886 .rpc_argp = &args,
8887 .rpc_resp = &res,
8888 .rpc_cred = cred,
8889 };
8890 int status;
8891
8892 dprintk("--> %s\n", __func__);
8893 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8894 if (res.notification & ~args.notify_types)
8895 dprintk("%s: unsupported notification\n", __func__);
8896 if (res.notification != args.notify_types)
8897 pdev->nocache = 1;
8898
8899 dprintk("<-- %s status=%d\n", __func__, status);
8900
8901 return status;
8902 }
8903
8904 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8905 struct pnfs_device *pdev,
8906 struct rpc_cred *cred)
8907 {
8908 struct nfs4_exception exception = { };
8909 int err;
8910
8911 do {
8912 err = nfs4_handle_exception(server,
8913 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8914 &exception);
8915 } while (exception.retry);
8916 return err;
8917 }
8918 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8919
8920 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8921 {
8922 struct nfs4_layoutcommit_data *data = calldata;
8923 struct nfs_server *server = NFS_SERVER(data->args.inode);
8924
8925 nfs4_setup_sequence(server->nfs_client,
8926 &data->args.seq_args,
8927 &data->res.seq_res,
8928 task);
8929 }
8930
8931 static void
8932 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8933 {
8934 struct nfs4_layoutcommit_data *data = calldata;
8935 struct nfs_server *server = NFS_SERVER(data->args.inode);
8936
8937 if (!nfs41_sequence_done(task, &data->res.seq_res))
8938 return;
8939
8940 switch (task->tk_status) { /* Just ignore these failures */
8941 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8942 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8943 case -NFS4ERR_BADLAYOUT: /* no layout */
8944 case -NFS4ERR_GRACE: /* loca_recalim always false */
8945 task->tk_status = 0;
8946 case 0:
8947 break;
8948 default:
8949 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8950 rpc_restart_call_prepare(task);
8951 return;
8952 }
8953 }
8954 }
8955
8956 static void nfs4_layoutcommit_release(void *calldata)
8957 {
8958 struct nfs4_layoutcommit_data *data = calldata;
8959
8960 pnfs_cleanup_layoutcommit(data);
8961 nfs_post_op_update_inode_force_wcc(data->args.inode,
8962 data->res.fattr);
8963 put_rpccred(data->cred);
8964 nfs_iput_and_deactive(data->inode);
8965 kfree(data);
8966 }
8967
8968 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8969 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8970 .rpc_call_done = nfs4_layoutcommit_done,
8971 .rpc_release = nfs4_layoutcommit_release,
8972 };
8973
8974 int
8975 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8976 {
8977 struct rpc_message msg = {
8978 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8979 .rpc_argp = &data->args,
8980 .rpc_resp = &data->res,
8981 .rpc_cred = data->cred,
8982 };
8983 struct rpc_task_setup task_setup_data = {
8984 .task = &data->task,
8985 .rpc_client = NFS_CLIENT(data->args.inode),
8986 .rpc_message = &msg,
8987 .callback_ops = &nfs4_layoutcommit_ops,
8988 .callback_data = data,
8989 };
8990 struct rpc_task *task;
8991 int status = 0;
8992
8993 dprintk("NFS: initiating layoutcommit call. sync %d "
8994 "lbw: %llu inode %lu\n", sync,
8995 data->args.lastbytewritten,
8996 data->args.inode->i_ino);
8997
8998 if (!sync) {
8999 data->inode = nfs_igrab_and_active(data->args.inode);
9000 if (data->inode == NULL) {
9001 nfs4_layoutcommit_release(data);
9002 return -EAGAIN;
9003 }
9004 task_setup_data.flags = RPC_TASK_ASYNC;
9005 }
9006 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
9007 task = rpc_run_task(&task_setup_data);
9008 if (IS_ERR(task))
9009 return PTR_ERR(task);
9010 if (sync)
9011 status = task->tk_status;
9012 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
9013 dprintk("%s: status %d\n", __func__, status);
9014 rpc_put_task(task);
9015 return status;
9016 }
9017
9018 /**
9019 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
9020 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
9021 */
9022 static int
9023 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
9024 struct nfs_fsinfo *info,
9025 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
9026 {
9027 struct nfs41_secinfo_no_name_args args = {
9028 .style = SECINFO_STYLE_CURRENT_FH,
9029 };
9030 struct nfs4_secinfo_res res = {
9031 .flavors = flavors,
9032 };
9033 struct rpc_message msg = {
9034 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
9035 .rpc_argp = &args,
9036 .rpc_resp = &res,
9037 };
9038 struct rpc_clnt *clnt = server->client;
9039 struct rpc_cred *cred = NULL;
9040 int status;
9041
9042 if (use_integrity) {
9043 clnt = server->nfs_client->cl_rpcclient;
9044 cred = nfs4_get_clid_cred(server->nfs_client);
9045 msg.rpc_cred = cred;
9046 }
9047
9048 dprintk("--> %s\n", __func__);
9049 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
9050 &res.seq_res, 0);
9051 dprintk("<-- %s status=%d\n", __func__, status);
9052
9053 if (cred)
9054 put_rpccred(cred);
9055
9056 return status;
9057 }
9058
9059 static int
9060 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
9061 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
9062 {
9063 struct nfs4_exception exception = { };
9064 int err;
9065 do {
9066 /* first try using integrity protection */
9067 err = -NFS4ERR_WRONGSEC;
9068
9069 /* try to use integrity protection with machine cred */
9070 if (_nfs4_is_integrity_protected(server->nfs_client))
9071 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
9072 flavors, true);
9073
9074 /*
9075 * if unable to use integrity protection, or SECINFO with
9076 * integrity protection returns NFS4ERR_WRONGSEC (which is
9077 * disallowed by spec, but exists in deployed servers) use
9078 * the current filesystem's rpc_client and the user cred.
9079 */
9080 if (err == -NFS4ERR_WRONGSEC)
9081 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
9082 flavors, false);
9083
9084 switch (err) {
9085 case 0:
9086 case -NFS4ERR_WRONGSEC:
9087 case -ENOTSUPP:
9088 goto out;
9089 default:
9090 err = nfs4_handle_exception(server, err, &exception);
9091 }
9092 } while (exception.retry);
9093 out:
9094 return err;
9095 }
9096
9097 static int
9098 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
9099 struct nfs_fsinfo *info)
9100 {
9101 int err;
9102 struct page *page;
9103 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
9104 struct nfs4_secinfo_flavors *flavors;
9105 struct nfs4_secinfo4 *secinfo;
9106 int i;
9107
9108 page = alloc_page(GFP_KERNEL);
9109 if (!page) {
9110 err = -ENOMEM;
9111 goto out;
9112 }
9113
9114 flavors = page_address(page);
9115 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
9116
9117 /*
9118 * Fall back on "guess and check" method if
9119 * the server doesn't support SECINFO_NO_NAME
9120 */
9121 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
9122 err = nfs4_find_root_sec(server, fhandle, info);
9123 goto out_freepage;
9124 }
9125 if (err)
9126 goto out_freepage;
9127
9128 for (i = 0; i < flavors->num_flavors; i++) {
9129 secinfo = &flavors->flavors[i];
9130
9131 switch (secinfo->flavor) {
9132 case RPC_AUTH_NULL:
9133 case RPC_AUTH_UNIX:
9134 case RPC_AUTH_GSS:
9135 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
9136 &secinfo->flavor_info);
9137 break;
9138 default:
9139 flavor = RPC_AUTH_MAXFLAVOR;
9140 break;
9141 }
9142
9143 if (!nfs_auth_info_match(&server->auth_info, flavor))
9144 flavor = RPC_AUTH_MAXFLAVOR;
9145
9146 if (flavor != RPC_AUTH_MAXFLAVOR) {
9147 err = nfs4_lookup_root_sec(server, fhandle,
9148 info, flavor);
9149 if (!err)
9150 break;
9151 }
9152 }
9153
9154 if (flavor == RPC_AUTH_MAXFLAVOR)
9155 err = -EPERM;
9156
9157 out_freepage:
9158 put_page(page);
9159 if (err == -EACCES)
9160 return -EPERM;
9161 out:
9162 return err;
9163 }
9164
9165 static int _nfs41_test_stateid(struct nfs_server *server,
9166 nfs4_stateid *stateid,
9167 struct rpc_cred *cred)
9168 {
9169 int status;
9170 struct nfs41_test_stateid_args args = {
9171 .stateid = stateid,
9172 };
9173 struct nfs41_test_stateid_res res;
9174 struct rpc_message msg = {
9175 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
9176 .rpc_argp = &args,
9177 .rpc_resp = &res,
9178 .rpc_cred = cred,
9179 };
9180 struct rpc_clnt *rpc_client = server->client;
9181
9182 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
9183 &rpc_client, &msg);
9184
9185 dprintk("NFS call test_stateid %p\n", stateid);
9186 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
9187 nfs4_set_sequence_privileged(&args.seq_args);
9188 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
9189 &args.seq_args, &res.seq_res);
9190 if (status != NFS_OK) {
9191 dprintk("NFS reply test_stateid: failed, %d\n", status);
9192 return status;
9193 }
9194 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
9195 return -res.status;
9196 }
9197
9198 static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
9199 int err, struct nfs4_exception *exception)
9200 {
9201 exception->retry = 0;
9202 switch(err) {
9203 case -NFS4ERR_DELAY:
9204 case -NFS4ERR_RETRY_UNCACHED_REP:
9205 nfs4_handle_exception(server, err, exception);
9206 break;
9207 case -NFS4ERR_BADSESSION:
9208 case -NFS4ERR_BADSLOT:
9209 case -NFS4ERR_BAD_HIGH_SLOT:
9210 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9211 case -NFS4ERR_DEADSESSION:
9212 nfs4_do_handle_exception(server, err, exception);
9213 }
9214 }
9215
9216 /**
9217 * nfs41_test_stateid - perform a TEST_STATEID operation
9218 *
9219 * @server: server / transport on which to perform the operation
9220 * @stateid: state ID to test
9221 * @cred: credential
9222 *
9223 * Returns NFS_OK if the server recognizes that "stateid" is valid.
9224 * Otherwise a negative NFS4ERR value is returned if the operation
9225 * failed or the state ID is not currently valid.
9226 */
9227 static int nfs41_test_stateid(struct nfs_server *server,
9228 nfs4_stateid *stateid,
9229 struct rpc_cred *cred)
9230 {
9231 struct nfs4_exception exception = { };
9232 int err;
9233 do {
9234 err = _nfs41_test_stateid(server, stateid, cred);
9235 nfs4_handle_delay_or_session_error(server, err, &exception);
9236 } while (exception.retry);
9237 return err;
9238 }
9239
9240 struct nfs_free_stateid_data {
9241 struct nfs_server *server;
9242 struct nfs41_free_stateid_args args;
9243 struct nfs41_free_stateid_res res;
9244 };
9245
9246 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
9247 {
9248 struct nfs_free_stateid_data *data = calldata;
9249 nfs4_setup_sequence(data->server->nfs_client,
9250 &data->args.seq_args,
9251 &data->res.seq_res,
9252 task);
9253 }
9254
9255 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
9256 {
9257 struct nfs_free_stateid_data *data = calldata;
9258
9259 nfs41_sequence_done(task, &data->res.seq_res);
9260
9261 switch (task->tk_status) {
9262 case -NFS4ERR_DELAY:
9263 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
9264 rpc_restart_call_prepare(task);
9265 }
9266 }
9267
9268 static void nfs41_free_stateid_release(void *calldata)
9269 {
9270 kfree(calldata);
9271 }
9272
9273 static const struct rpc_call_ops nfs41_free_stateid_ops = {
9274 .rpc_call_prepare = nfs41_free_stateid_prepare,
9275 .rpc_call_done = nfs41_free_stateid_done,
9276 .rpc_release = nfs41_free_stateid_release,
9277 };
9278
9279 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
9280 const nfs4_stateid *stateid,
9281 struct rpc_cred *cred,
9282 bool privileged)
9283 {
9284 struct rpc_message msg = {
9285 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
9286 .rpc_cred = cred,
9287 };
9288 struct rpc_task_setup task_setup = {
9289 .rpc_client = server->client,
9290 .rpc_message = &msg,
9291 .callback_ops = &nfs41_free_stateid_ops,
9292 .flags = RPC_TASK_ASYNC,
9293 };
9294 struct nfs_free_stateid_data *data;
9295
9296 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
9297 &task_setup.rpc_client, &msg);
9298
9299 dprintk("NFS call free_stateid %p\n", stateid);
9300 data = kmalloc(sizeof(*data), GFP_NOFS);
9301 if (!data)
9302 return ERR_PTR(-ENOMEM);
9303 data->server = server;
9304 nfs4_stateid_copy(&data->args.stateid, stateid);
9305
9306 task_setup.callback_data = data;
9307
9308 msg.rpc_argp = &data->args;
9309 msg.rpc_resp = &data->res;
9310 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
9311 if (privileged)
9312 nfs4_set_sequence_privileged(&data->args.seq_args);
9313
9314 return rpc_run_task(&task_setup);
9315 }
9316
9317 /**
9318 * nfs41_free_stateid - perform a FREE_STATEID operation
9319 *
9320 * @server: server / transport on which to perform the operation
9321 * @stateid: state ID to release
9322 * @cred: credential
9323 * @is_recovery: set to true if this call needs to be privileged
9324 *
9325 * Note: this function is always asynchronous.
9326 */
9327 static int nfs41_free_stateid(struct nfs_server *server,
9328 const nfs4_stateid *stateid,
9329 struct rpc_cred *cred,
9330 bool is_recovery)
9331 {
9332 struct rpc_task *task;
9333
9334 task = _nfs41_free_stateid(server, stateid, cred, is_recovery);
9335 if (IS_ERR(task))
9336 return PTR_ERR(task);
9337 rpc_put_task(task);
9338 return 0;
9339 }
9340
9341 static void
9342 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
9343 {
9344 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
9345
9346 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
9347 nfs4_free_lock_state(server, lsp);
9348 }
9349
9350 static bool nfs41_match_stateid(const nfs4_stateid *s1,
9351 const nfs4_stateid *s2)
9352 {
9353 if (s1->type != s2->type)
9354 return false;
9355
9356 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
9357 return false;
9358
9359 if (s1->seqid == s2->seqid)
9360 return true;
9361
9362 return s1->seqid == 0 || s2->seqid == 0;
9363 }
9364
9365 #endif /* CONFIG_NFS_V4_1 */
9366
9367 static bool nfs4_match_stateid(const nfs4_stateid *s1,
9368 const nfs4_stateid *s2)
9369 {
9370 return nfs4_stateid_match(s1, s2);
9371 }
9372
9373
9374 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
9375 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
9376 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
9377 .recover_open = nfs4_open_reclaim,
9378 .recover_lock = nfs4_lock_reclaim,
9379 .establish_clid = nfs4_init_clientid,
9380 .detect_trunking = nfs40_discover_server_trunking,
9381 };
9382
9383 #if defined(CONFIG_NFS_V4_1)
9384 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
9385 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
9386 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
9387 .recover_open = nfs4_open_reclaim,
9388 .recover_lock = nfs4_lock_reclaim,
9389 .establish_clid = nfs41_init_clientid,
9390 .reclaim_complete = nfs41_proc_reclaim_complete,
9391 .detect_trunking = nfs41_discover_server_trunking,
9392 };
9393 #endif /* CONFIG_NFS_V4_1 */
9394
9395 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
9396 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
9397 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
9398 .recover_open = nfs40_open_expired,
9399 .recover_lock = nfs4_lock_expired,
9400 .establish_clid = nfs4_init_clientid,
9401 };
9402
9403 #if defined(CONFIG_NFS_V4_1)
9404 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
9405 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
9406 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
9407 .recover_open = nfs41_open_expired,
9408 .recover_lock = nfs41_lock_expired,
9409 .establish_clid = nfs41_init_clientid,
9410 };
9411 #endif /* CONFIG_NFS_V4_1 */
9412
9413 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
9414 .sched_state_renewal = nfs4_proc_async_renew,
9415 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
9416 .renew_lease = nfs4_proc_renew,
9417 };
9418
9419 #if defined(CONFIG_NFS_V4_1)
9420 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
9421 .sched_state_renewal = nfs41_proc_async_sequence,
9422 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
9423 .renew_lease = nfs4_proc_sequence,
9424 };
9425 #endif
9426
9427 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
9428 .get_locations = _nfs40_proc_get_locations,
9429 .fsid_present = _nfs40_proc_fsid_present,
9430 };
9431
9432 #if defined(CONFIG_NFS_V4_1)
9433 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
9434 .get_locations = _nfs41_proc_get_locations,
9435 .fsid_present = _nfs41_proc_fsid_present,
9436 };
9437 #endif /* CONFIG_NFS_V4_1 */
9438
9439 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
9440 .minor_version = 0,
9441 .init_caps = NFS_CAP_READDIRPLUS
9442 | NFS_CAP_ATOMIC_OPEN
9443 | NFS_CAP_POSIX_LOCK,
9444 .init_client = nfs40_init_client,
9445 .shutdown_client = nfs40_shutdown_client,
9446 .match_stateid = nfs4_match_stateid,
9447 .find_root_sec = nfs4_find_root_sec,
9448 .free_lock_state = nfs4_release_lockowner,
9449 .test_and_free_expired = nfs40_test_and_free_expired_stateid,
9450 .alloc_seqid = nfs_alloc_seqid,
9451 .call_sync_ops = &nfs40_call_sync_ops,
9452 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
9453 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
9454 .state_renewal_ops = &nfs40_state_renewal_ops,
9455 .mig_recovery_ops = &nfs40_mig_recovery_ops,
9456 };
9457
9458 #if defined(CONFIG_NFS_V4_1)
9459 static struct nfs_seqid *
9460 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
9461 {
9462 return NULL;
9463 }
9464
9465 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
9466 .minor_version = 1,
9467 .init_caps = NFS_CAP_READDIRPLUS
9468 | NFS_CAP_ATOMIC_OPEN
9469 | NFS_CAP_POSIX_LOCK
9470 | NFS_CAP_STATEID_NFSV41
9471 | NFS_CAP_ATOMIC_OPEN_V1,
9472 .init_client = nfs41_init_client,
9473 .shutdown_client = nfs41_shutdown_client,
9474 .match_stateid = nfs41_match_stateid,
9475 .find_root_sec = nfs41_find_root_sec,
9476 .free_lock_state = nfs41_free_lock_state,
9477 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
9478 .alloc_seqid = nfs_alloc_no_seqid,
9479 .session_trunk = nfs4_test_session_trunk,
9480 .call_sync_ops = &nfs41_call_sync_ops,
9481 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
9482 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
9483 .state_renewal_ops = &nfs41_state_renewal_ops,
9484 .mig_recovery_ops = &nfs41_mig_recovery_ops,
9485 };
9486 #endif
9487
9488 #if defined(CONFIG_NFS_V4_2)
9489 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
9490 .minor_version = 2,
9491 .init_caps = NFS_CAP_READDIRPLUS
9492 | NFS_CAP_ATOMIC_OPEN
9493 | NFS_CAP_POSIX_LOCK
9494 | NFS_CAP_STATEID_NFSV41
9495 | NFS_CAP_ATOMIC_OPEN_V1
9496 | NFS_CAP_ALLOCATE
9497 | NFS_CAP_COPY
9498 | NFS_CAP_DEALLOCATE
9499 | NFS_CAP_SEEK
9500 | NFS_CAP_LAYOUTSTATS
9501 | NFS_CAP_CLONE,
9502 .init_client = nfs41_init_client,
9503 .shutdown_client = nfs41_shutdown_client,
9504 .match_stateid = nfs41_match_stateid,
9505 .find_root_sec = nfs41_find_root_sec,
9506 .free_lock_state = nfs41_free_lock_state,
9507 .call_sync_ops = &nfs41_call_sync_ops,
9508 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
9509 .alloc_seqid = nfs_alloc_no_seqid,
9510 .session_trunk = nfs4_test_session_trunk,
9511 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
9512 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
9513 .state_renewal_ops = &nfs41_state_renewal_ops,
9514 .mig_recovery_ops = &nfs41_mig_recovery_ops,
9515 };
9516 #endif
9517
9518 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
9519 [0] = &nfs_v4_0_minor_ops,
9520 #if defined(CONFIG_NFS_V4_1)
9521 [1] = &nfs_v4_1_minor_ops,
9522 #endif
9523 #if defined(CONFIG_NFS_V4_2)
9524 [2] = &nfs_v4_2_minor_ops,
9525 #endif
9526 };
9527
9528 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
9529 {
9530 ssize_t error, error2;
9531
9532 error = generic_listxattr(dentry, list, size);
9533 if (error < 0)
9534 return error;
9535 if (list) {
9536 list += error;
9537 size -= error;
9538 }
9539
9540 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
9541 if (error2 < 0)
9542 return error2;
9543 return error + error2;
9544 }
9545
9546 static const struct inode_operations nfs4_dir_inode_operations = {
9547 .create = nfs_create,
9548 .lookup = nfs_lookup,
9549 .atomic_open = nfs_atomic_open,
9550 .link = nfs_link,
9551 .unlink = nfs_unlink,
9552 .symlink = nfs_symlink,
9553 .mkdir = nfs_mkdir,
9554 .rmdir = nfs_rmdir,
9555 .mknod = nfs_mknod,
9556 .rename = nfs_rename,
9557 .permission = nfs_permission,
9558 .getattr = nfs_getattr,
9559 .setattr = nfs_setattr,
9560 .listxattr = nfs4_listxattr,
9561 };
9562
9563 static const struct inode_operations nfs4_file_inode_operations = {
9564 .permission = nfs_permission,
9565 .getattr = nfs_getattr,
9566 .setattr = nfs_setattr,
9567 .listxattr = nfs4_listxattr,
9568 };
9569
9570 const struct nfs_rpc_ops nfs_v4_clientops = {
9571 .version = 4, /* protocol version */
9572 .dentry_ops = &nfs4_dentry_operations,
9573 .dir_inode_ops = &nfs4_dir_inode_operations,
9574 .file_inode_ops = &nfs4_file_inode_operations,
9575 .file_ops = &nfs4_file_operations,
9576 .getroot = nfs4_proc_get_root,
9577 .submount = nfs4_submount,
9578 .try_mount = nfs4_try_mount,
9579 .getattr = nfs4_proc_getattr,
9580 .setattr = nfs4_proc_setattr,
9581 .lookup = nfs4_proc_lookup,
9582 .lookupp = nfs4_proc_lookupp,
9583 .access = nfs4_proc_access,
9584 .readlink = nfs4_proc_readlink,
9585 .create = nfs4_proc_create,
9586 .remove = nfs4_proc_remove,
9587 .unlink_setup = nfs4_proc_unlink_setup,
9588 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
9589 .unlink_done = nfs4_proc_unlink_done,
9590 .rename_setup = nfs4_proc_rename_setup,
9591 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
9592 .rename_done = nfs4_proc_rename_done,
9593 .link = nfs4_proc_link,
9594 .symlink = nfs4_proc_symlink,
9595 .mkdir = nfs4_proc_mkdir,
9596 .rmdir = nfs4_proc_remove,
9597 .readdir = nfs4_proc_readdir,
9598 .mknod = nfs4_proc_mknod,
9599 .statfs = nfs4_proc_statfs,
9600 .fsinfo = nfs4_proc_fsinfo,
9601 .pathconf = nfs4_proc_pathconf,
9602 .set_capabilities = nfs4_server_capabilities,
9603 .decode_dirent = nfs4_decode_dirent,
9604 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
9605 .read_setup = nfs4_proc_read_setup,
9606 .read_done = nfs4_read_done,
9607 .write_setup = nfs4_proc_write_setup,
9608 .write_done = nfs4_write_done,
9609 .commit_setup = nfs4_proc_commit_setup,
9610 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
9611 .commit_done = nfs4_commit_done,
9612 .lock = nfs4_proc_lock,
9613 .clear_acl_cache = nfs4_zap_acl_attr,
9614 .close_context = nfs4_close_context,
9615 .open_context = nfs4_atomic_open,
9616 .have_delegation = nfs4_have_delegation,
9617 .return_delegation = nfs4_inode_return_delegation,
9618 .alloc_client = nfs4_alloc_client,
9619 .init_client = nfs4_init_client,
9620 .free_client = nfs4_free_client,
9621 .create_server = nfs4_create_server,
9622 .clone_server = nfs_clone_server,
9623 };
9624
9625 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
9626 .name = XATTR_NAME_NFSV4_ACL,
9627 .list = nfs4_xattr_list_nfs4_acl,
9628 .get = nfs4_xattr_get_nfs4_acl,
9629 .set = nfs4_xattr_set_nfs4_acl,
9630 };
9631
9632 const struct xattr_handler *nfs4_xattr_handlers[] = {
9633 &nfs4_xattr_nfs4_acl_handler,
9634 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
9635 &nfs4_xattr_nfs4_label_handler,
9636 #endif
9637 NULL
9638 };
9639
9640 /*
9641 * Local variables:
9642 * c-basic-offset: 8
9643 * End:
9644 */