]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/nfs/nfs4proc.c
UBUNTU: Ubuntu-5.11.0-22.23
[mirror_ubuntu-hirsute-kernel.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/xattr.h>
55 #include <linux/utsname.h>
56 #include <linux/freezer.h>
57 #include <linux/iversion.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "sysfs.h"
67 #include "nfs4idmap.h"
68 #include "nfs4session.h"
69 #include "fscache.h"
70 #include "nfs42.h"
71
72 #include "nfs4trace.h"
73
74 #ifdef CONFIG_NFS_V4_2
75 #include "nfs42.h"
76 #endif /* CONFIG_NFS_V4_2 */
77
78 #define NFSDBG_FACILITY NFSDBG_PROC
79
80 #define NFS4_BITMASK_SZ 3
81
82 #define NFS4_POLL_RETRY_MIN (HZ/10)
83 #define NFS4_POLL_RETRY_MAX (15*HZ)
84
85 /* file attributes which can be mapped to nfs attributes */
86 #define NFS4_VALID_ATTRS (ATTR_MODE \
87 | ATTR_UID \
88 | ATTR_GID \
89 | ATTR_SIZE \
90 | ATTR_ATIME \
91 | ATTR_MTIME \
92 | ATTR_CTIME \
93 | ATTR_ATIME_SET \
94 | ATTR_MTIME_SET)
95
96 struct nfs4_opendata;
97 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
98 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
99 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
100 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label, struct inode *inode);
101 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
102 struct nfs_fattr *fattr, struct iattr *sattr,
103 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
104 struct nfs4_label *olabel);
105 #ifdef CONFIG_NFS_V4_1
106 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
107 const struct cred *cred,
108 struct nfs4_slot *slot,
109 bool is_privileged);
110 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
111 const struct cred *);
112 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
113 const struct cred *, bool);
114 #endif
115 static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
116 struct nfs_server *server,
117 struct nfs4_label *label);
118
119 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
120 static inline struct nfs4_label *
121 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
122 struct iattr *sattr, struct nfs4_label *label)
123 {
124 int err;
125
126 if (label == NULL)
127 return NULL;
128
129 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
130 return NULL;
131
132 err = security_dentry_init_security(dentry, sattr->ia_mode,
133 &dentry->d_name, (void **)&label->label, &label->len);
134 if (err == 0)
135 return label;
136
137 return NULL;
138 }
139 static inline void
140 nfs4_label_release_security(struct nfs4_label *label)
141 {
142 struct lsmcontext scaff; /* scaffolding */
143
144 if (label) {
145 lsmcontext_init(&scaff, label->label, label->len, 0);
146 security_release_secctx(&scaff);
147 }
148 }
149 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
150 {
151 if (label)
152 return server->attr_bitmask;
153
154 return server->attr_bitmask_nl;
155 }
156 #else
157 static inline struct nfs4_label *
158 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
159 struct iattr *sattr, struct nfs4_label *l)
160 { return NULL; }
161 static inline void
162 nfs4_label_release_security(struct nfs4_label *label)
163 { return; }
164 static inline u32 *
165 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
166 { return server->attr_bitmask; }
167 #endif
168
169 /* Prevent leaks of NFSv4 errors into userland */
170 static int nfs4_map_errors(int err)
171 {
172 if (err >= -1000)
173 return err;
174 switch (err) {
175 case -NFS4ERR_RESOURCE:
176 case -NFS4ERR_LAYOUTTRYLATER:
177 case -NFS4ERR_RECALLCONFLICT:
178 return -EREMOTEIO;
179 case -NFS4ERR_WRONGSEC:
180 case -NFS4ERR_WRONG_CRED:
181 return -EPERM;
182 case -NFS4ERR_BADOWNER:
183 case -NFS4ERR_BADNAME:
184 return -EINVAL;
185 case -NFS4ERR_SHARE_DENIED:
186 return -EACCES;
187 case -NFS4ERR_MINOR_VERS_MISMATCH:
188 return -EPROTONOSUPPORT;
189 case -NFS4ERR_FILE_OPEN:
190 return -EBUSY;
191 case -NFS4ERR_NOT_SAME:
192 return -ENOTSYNC;
193 default:
194 dprintk("%s could not handle NFSv4 error %d\n",
195 __func__, -err);
196 break;
197 }
198 return -EIO;
199 }
200
201 /*
202 * This is our standard bitmap for GETATTR requests.
203 */
204 const u32 nfs4_fattr_bitmap[3] = {
205 FATTR4_WORD0_TYPE
206 | FATTR4_WORD0_CHANGE
207 | FATTR4_WORD0_SIZE
208 | FATTR4_WORD0_FSID
209 | FATTR4_WORD0_FILEID,
210 FATTR4_WORD1_MODE
211 | FATTR4_WORD1_NUMLINKS
212 | FATTR4_WORD1_OWNER
213 | FATTR4_WORD1_OWNER_GROUP
214 | FATTR4_WORD1_RAWDEV
215 | FATTR4_WORD1_SPACE_USED
216 | FATTR4_WORD1_TIME_ACCESS
217 | FATTR4_WORD1_TIME_METADATA
218 | FATTR4_WORD1_TIME_MODIFY
219 | FATTR4_WORD1_MOUNTED_ON_FILEID,
220 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
221 FATTR4_WORD2_SECURITY_LABEL
222 #endif
223 };
224
225 static const u32 nfs4_pnfs_open_bitmap[3] = {
226 FATTR4_WORD0_TYPE
227 | FATTR4_WORD0_CHANGE
228 | FATTR4_WORD0_SIZE
229 | FATTR4_WORD0_FSID
230 | FATTR4_WORD0_FILEID,
231 FATTR4_WORD1_MODE
232 | FATTR4_WORD1_NUMLINKS
233 | FATTR4_WORD1_OWNER
234 | FATTR4_WORD1_OWNER_GROUP
235 | FATTR4_WORD1_RAWDEV
236 | FATTR4_WORD1_SPACE_USED
237 | FATTR4_WORD1_TIME_ACCESS
238 | FATTR4_WORD1_TIME_METADATA
239 | FATTR4_WORD1_TIME_MODIFY,
240 FATTR4_WORD2_MDSTHRESHOLD
241 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
242 | FATTR4_WORD2_SECURITY_LABEL
243 #endif
244 };
245
246 static const u32 nfs4_open_noattr_bitmap[3] = {
247 FATTR4_WORD0_TYPE
248 | FATTR4_WORD0_FILEID,
249 };
250
251 const u32 nfs4_statfs_bitmap[3] = {
252 FATTR4_WORD0_FILES_AVAIL
253 | FATTR4_WORD0_FILES_FREE
254 | FATTR4_WORD0_FILES_TOTAL,
255 FATTR4_WORD1_SPACE_AVAIL
256 | FATTR4_WORD1_SPACE_FREE
257 | FATTR4_WORD1_SPACE_TOTAL
258 };
259
260 const u32 nfs4_pathconf_bitmap[3] = {
261 FATTR4_WORD0_MAXLINK
262 | FATTR4_WORD0_MAXNAME,
263 0
264 };
265
266 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
267 | FATTR4_WORD0_MAXREAD
268 | FATTR4_WORD0_MAXWRITE
269 | FATTR4_WORD0_LEASE_TIME,
270 FATTR4_WORD1_TIME_DELTA
271 | FATTR4_WORD1_FS_LAYOUT_TYPES,
272 FATTR4_WORD2_LAYOUT_BLKSIZE
273 | FATTR4_WORD2_CLONE_BLKSIZE
274 | FATTR4_WORD2_XATTR_SUPPORT
275 };
276
277 const u32 nfs4_fs_locations_bitmap[3] = {
278 FATTR4_WORD0_CHANGE
279 | FATTR4_WORD0_SIZE
280 | FATTR4_WORD0_FSID
281 | FATTR4_WORD0_FILEID
282 | FATTR4_WORD0_FS_LOCATIONS,
283 FATTR4_WORD1_OWNER
284 | FATTR4_WORD1_OWNER_GROUP
285 | FATTR4_WORD1_RAWDEV
286 | FATTR4_WORD1_SPACE_USED
287 | FATTR4_WORD1_TIME_ACCESS
288 | FATTR4_WORD1_TIME_METADATA
289 | FATTR4_WORD1_TIME_MODIFY
290 | FATTR4_WORD1_MOUNTED_ON_FILEID,
291 };
292
293 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
294 struct inode *inode)
295 {
296 unsigned long cache_validity;
297
298 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
299 if (!inode || !nfs4_have_delegation(inode, FMODE_READ))
300 return;
301
302 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
303 if (!(cache_validity & NFS_INO_REVAL_FORCED))
304 cache_validity &= ~(NFS_INO_INVALID_CHANGE
305 | NFS_INO_INVALID_SIZE);
306
307 if (!(cache_validity & NFS_INO_INVALID_SIZE))
308 dst[0] &= ~FATTR4_WORD0_SIZE;
309
310 if (!(cache_validity & NFS_INO_INVALID_CHANGE))
311 dst[0] &= ~FATTR4_WORD0_CHANGE;
312 }
313
314 static void nfs4_bitmap_copy_adjust_setattr(__u32 *dst,
315 const __u32 *src, struct inode *inode)
316 {
317 nfs4_bitmap_copy_adjust(dst, src, inode);
318 }
319
320 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
321 struct nfs4_readdir_arg *readdir)
322 {
323 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
324 __be32 *start, *p;
325
326 if (cookie > 2) {
327 readdir->cookie = cookie;
328 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
329 return;
330 }
331
332 readdir->cookie = 0;
333 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
334 if (cookie == 2)
335 return;
336
337 /*
338 * NFSv4 servers do not return entries for '.' and '..'
339 * Therefore, we fake these entries here. We let '.'
340 * have cookie 0 and '..' have cookie 1. Note that
341 * when talking to the server, we always send cookie 0
342 * instead of 1 or 2.
343 */
344 start = p = kmap_atomic(*readdir->pages);
345
346 if (cookie == 0) {
347 *p++ = xdr_one; /* next */
348 *p++ = xdr_zero; /* cookie, first word */
349 *p++ = xdr_one; /* cookie, second word */
350 *p++ = xdr_one; /* entry len */
351 memcpy(p, ".\0\0\0", 4); /* entry */
352 p++;
353 *p++ = xdr_one; /* bitmap length */
354 *p++ = htonl(attrs); /* bitmap */
355 *p++ = htonl(12); /* attribute buffer length */
356 *p++ = htonl(NF4DIR);
357 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
358 }
359
360 *p++ = xdr_one; /* next */
361 *p++ = xdr_zero; /* cookie, first word */
362 *p++ = xdr_two; /* cookie, second word */
363 *p++ = xdr_two; /* entry len */
364 memcpy(p, "..\0\0", 4); /* entry */
365 p++;
366 *p++ = xdr_one; /* bitmap length */
367 *p++ = htonl(attrs); /* bitmap */
368 *p++ = htonl(12); /* attribute buffer length */
369 *p++ = htonl(NF4DIR);
370 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
371
372 readdir->pgbase = (char *)p - (char *)start;
373 readdir->count -= readdir->pgbase;
374 kunmap_atomic(start);
375 }
376
377 static void nfs4_test_and_free_stateid(struct nfs_server *server,
378 nfs4_stateid *stateid,
379 const struct cred *cred)
380 {
381 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
382
383 ops->test_and_free_expired(server, stateid, cred);
384 }
385
386 static void __nfs4_free_revoked_stateid(struct nfs_server *server,
387 nfs4_stateid *stateid,
388 const struct cred *cred)
389 {
390 stateid->type = NFS4_REVOKED_STATEID_TYPE;
391 nfs4_test_and_free_stateid(server, stateid, cred);
392 }
393
394 static void nfs4_free_revoked_stateid(struct nfs_server *server,
395 const nfs4_stateid *stateid,
396 const struct cred *cred)
397 {
398 nfs4_stateid tmp;
399
400 nfs4_stateid_copy(&tmp, stateid);
401 __nfs4_free_revoked_stateid(server, &tmp, cred);
402 }
403
404 static long nfs4_update_delay(long *timeout)
405 {
406 long ret;
407 if (!timeout)
408 return NFS4_POLL_RETRY_MAX;
409 if (*timeout <= 0)
410 *timeout = NFS4_POLL_RETRY_MIN;
411 if (*timeout > NFS4_POLL_RETRY_MAX)
412 *timeout = NFS4_POLL_RETRY_MAX;
413 ret = *timeout;
414 *timeout <<= 1;
415 return ret;
416 }
417
418 static int nfs4_delay_killable(long *timeout)
419 {
420 might_sleep();
421
422 freezable_schedule_timeout_killable_unsafe(
423 nfs4_update_delay(timeout));
424 if (!__fatal_signal_pending(current))
425 return 0;
426 return -EINTR;
427 }
428
429 static int nfs4_delay_interruptible(long *timeout)
430 {
431 might_sleep();
432
433 freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout));
434 if (!signal_pending(current))
435 return 0;
436 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
437 }
438
439 static int nfs4_delay(long *timeout, bool interruptible)
440 {
441 if (interruptible)
442 return nfs4_delay_interruptible(timeout);
443 return nfs4_delay_killable(timeout);
444 }
445
446 static const nfs4_stateid *
447 nfs4_recoverable_stateid(const nfs4_stateid *stateid)
448 {
449 if (!stateid)
450 return NULL;
451 switch (stateid->type) {
452 case NFS4_OPEN_STATEID_TYPE:
453 case NFS4_LOCK_STATEID_TYPE:
454 case NFS4_DELEGATION_STATEID_TYPE:
455 return stateid;
456 default:
457 break;
458 }
459 return NULL;
460 }
461
462 /* This is the error handling routine for processes that are allowed
463 * to sleep.
464 */
465 static int nfs4_do_handle_exception(struct nfs_server *server,
466 int errorcode, struct nfs4_exception *exception)
467 {
468 struct nfs_client *clp = server->nfs_client;
469 struct nfs4_state *state = exception->state;
470 const nfs4_stateid *stateid;
471 struct inode *inode = exception->inode;
472 int ret = errorcode;
473
474 exception->delay = 0;
475 exception->recovering = 0;
476 exception->retry = 0;
477
478 stateid = nfs4_recoverable_stateid(exception->stateid);
479 if (stateid == NULL && state != NULL)
480 stateid = nfs4_recoverable_stateid(&state->stateid);
481
482 switch(errorcode) {
483 case 0:
484 return 0;
485 case -NFS4ERR_BADHANDLE:
486 case -ESTALE:
487 if (inode != NULL && S_ISREG(inode->i_mode))
488 pnfs_destroy_layout(NFS_I(inode));
489 break;
490 case -NFS4ERR_DELEG_REVOKED:
491 case -NFS4ERR_ADMIN_REVOKED:
492 case -NFS4ERR_EXPIRED:
493 case -NFS4ERR_BAD_STATEID:
494 case -NFS4ERR_PARTNER_NO_AUTH:
495 if (inode != NULL && stateid != NULL) {
496 nfs_inode_find_state_and_recover(inode,
497 stateid);
498 goto wait_on_recovery;
499 }
500 fallthrough;
501 case -NFS4ERR_OPENMODE:
502 if (inode) {
503 int err;
504
505 err = nfs_async_inode_return_delegation(inode,
506 stateid);
507 if (err == 0)
508 goto wait_on_recovery;
509 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
510 exception->retry = 1;
511 break;
512 }
513 }
514 if (state == NULL)
515 break;
516 ret = nfs4_schedule_stateid_recovery(server, state);
517 if (ret < 0)
518 break;
519 goto wait_on_recovery;
520 case -NFS4ERR_STALE_STATEID:
521 case -NFS4ERR_STALE_CLIENTID:
522 nfs4_schedule_lease_recovery(clp);
523 goto wait_on_recovery;
524 case -NFS4ERR_MOVED:
525 ret = nfs4_schedule_migration_recovery(server);
526 if (ret < 0)
527 break;
528 goto wait_on_recovery;
529 case -NFS4ERR_LEASE_MOVED:
530 nfs4_schedule_lease_moved_recovery(clp);
531 goto wait_on_recovery;
532 #if defined(CONFIG_NFS_V4_1)
533 case -NFS4ERR_BADSESSION:
534 case -NFS4ERR_BADSLOT:
535 case -NFS4ERR_BAD_HIGH_SLOT:
536 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
537 case -NFS4ERR_DEADSESSION:
538 case -NFS4ERR_SEQ_FALSE_RETRY:
539 case -NFS4ERR_SEQ_MISORDERED:
540 /* Handled in nfs41_sequence_process() */
541 goto wait_on_recovery;
542 #endif /* defined(CONFIG_NFS_V4_1) */
543 case -NFS4ERR_FILE_OPEN:
544 if (exception->timeout > HZ) {
545 /* We have retried a decent amount, time to
546 * fail
547 */
548 ret = -EBUSY;
549 break;
550 }
551 fallthrough;
552 case -NFS4ERR_DELAY:
553 nfs_inc_server_stats(server, NFSIOS_DELAY);
554 fallthrough;
555 case -NFS4ERR_GRACE:
556 case -NFS4ERR_LAYOUTTRYLATER:
557 case -NFS4ERR_RECALLCONFLICT:
558 exception->delay = 1;
559 return 0;
560
561 case -NFS4ERR_RETRY_UNCACHED_REP:
562 case -NFS4ERR_OLD_STATEID:
563 exception->retry = 1;
564 break;
565 case -NFS4ERR_BADOWNER:
566 /* The following works around a Linux server bug! */
567 case -NFS4ERR_BADNAME:
568 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
569 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
570 exception->retry = 1;
571 printk(KERN_WARNING "NFS: v4 server %s "
572 "does not accept raw "
573 "uid/gids. "
574 "Reenabling the idmapper.\n",
575 server->nfs_client->cl_hostname);
576 }
577 }
578 /* We failed to handle the error */
579 return nfs4_map_errors(ret);
580 wait_on_recovery:
581 exception->recovering = 1;
582 return 0;
583 }
584
585 /* This is the error handling routine for processes that are allowed
586 * to sleep.
587 */
588 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
589 {
590 struct nfs_client *clp = server->nfs_client;
591 int ret;
592
593 ret = nfs4_do_handle_exception(server, errorcode, exception);
594 if (exception->delay) {
595 ret = nfs4_delay(&exception->timeout,
596 exception->interruptible);
597 goto out_retry;
598 }
599 if (exception->recovering) {
600 ret = nfs4_wait_clnt_recover(clp);
601 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
602 return -EIO;
603 goto out_retry;
604 }
605 return ret;
606 out_retry:
607 if (ret == 0)
608 exception->retry = 1;
609 return ret;
610 }
611
612 static int
613 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
614 int errorcode, struct nfs4_exception *exception)
615 {
616 struct nfs_client *clp = server->nfs_client;
617 int ret;
618
619 ret = nfs4_do_handle_exception(server, errorcode, exception);
620 if (exception->delay) {
621 rpc_delay(task, nfs4_update_delay(&exception->timeout));
622 goto out_retry;
623 }
624 if (exception->recovering) {
625 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
626 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
627 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
628 goto out_retry;
629 }
630 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
631 ret = -EIO;
632 return ret;
633 out_retry:
634 if (ret == 0) {
635 exception->retry = 1;
636 /*
637 * For NFS4ERR_MOVED, the client transport will need to
638 * be recomputed after migration recovery has completed.
639 */
640 if (errorcode == -NFS4ERR_MOVED)
641 rpc_task_release_transport(task);
642 }
643 return ret;
644 }
645
646 int
647 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
648 struct nfs4_state *state, long *timeout)
649 {
650 struct nfs4_exception exception = {
651 .state = state,
652 };
653
654 if (task->tk_status >= 0)
655 return 0;
656 if (timeout)
657 exception.timeout = *timeout;
658 task->tk_status = nfs4_async_handle_exception(task, server,
659 task->tk_status,
660 &exception);
661 if (exception.delay && timeout)
662 *timeout = exception.timeout;
663 if (exception.retry)
664 return -EAGAIN;
665 return 0;
666 }
667
668 /*
669 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
670 * or 'false' otherwise.
671 */
672 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
673 {
674 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
675 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
676 }
677
678 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
679 {
680 spin_lock(&clp->cl_lock);
681 if (time_before(clp->cl_last_renewal,timestamp))
682 clp->cl_last_renewal = timestamp;
683 spin_unlock(&clp->cl_lock);
684 }
685
686 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
687 {
688 struct nfs_client *clp = server->nfs_client;
689
690 if (!nfs4_has_session(clp))
691 do_renew_lease(clp, timestamp);
692 }
693
694 struct nfs4_call_sync_data {
695 const struct nfs_server *seq_server;
696 struct nfs4_sequence_args *seq_args;
697 struct nfs4_sequence_res *seq_res;
698 };
699
700 void nfs4_init_sequence(struct nfs4_sequence_args *args,
701 struct nfs4_sequence_res *res, int cache_reply,
702 int privileged)
703 {
704 args->sa_slot = NULL;
705 args->sa_cache_this = cache_reply;
706 args->sa_privileged = privileged;
707
708 res->sr_slot = NULL;
709 }
710
711 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
712 {
713 struct nfs4_slot *slot = res->sr_slot;
714 struct nfs4_slot_table *tbl;
715
716 tbl = slot->table;
717 spin_lock(&tbl->slot_tbl_lock);
718 if (!nfs41_wake_and_assign_slot(tbl, slot))
719 nfs4_free_slot(tbl, slot);
720 spin_unlock(&tbl->slot_tbl_lock);
721
722 res->sr_slot = NULL;
723 }
724
725 static int nfs40_sequence_done(struct rpc_task *task,
726 struct nfs4_sequence_res *res)
727 {
728 if (res->sr_slot != NULL)
729 nfs40_sequence_free_slot(res);
730 return 1;
731 }
732
733 #if defined(CONFIG_NFS_V4_1)
734
735 static void nfs41_release_slot(struct nfs4_slot *slot)
736 {
737 struct nfs4_session *session;
738 struct nfs4_slot_table *tbl;
739 bool send_new_highest_used_slotid = false;
740
741 if (!slot)
742 return;
743 tbl = slot->table;
744 session = tbl->session;
745
746 /* Bump the slot sequence number */
747 if (slot->seq_done)
748 slot->seq_nr++;
749 slot->seq_done = 0;
750
751 spin_lock(&tbl->slot_tbl_lock);
752 /* Be nice to the server: try to ensure that the last transmitted
753 * value for highest_user_slotid <= target_highest_slotid
754 */
755 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
756 send_new_highest_used_slotid = true;
757
758 if (nfs41_wake_and_assign_slot(tbl, slot)) {
759 send_new_highest_used_slotid = false;
760 goto out_unlock;
761 }
762 nfs4_free_slot(tbl, slot);
763
764 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
765 send_new_highest_used_slotid = false;
766 out_unlock:
767 spin_unlock(&tbl->slot_tbl_lock);
768 if (send_new_highest_used_slotid)
769 nfs41_notify_server(session->clp);
770 if (waitqueue_active(&tbl->slot_waitq))
771 wake_up_all(&tbl->slot_waitq);
772 }
773
774 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
775 {
776 nfs41_release_slot(res->sr_slot);
777 res->sr_slot = NULL;
778 }
779
780 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
781 u32 seqnr)
782 {
783 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
784 slot->seq_nr_highest_sent = seqnr;
785 }
786 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot,
787 u32 seqnr)
788 {
789 slot->seq_nr_highest_sent = seqnr;
790 slot->seq_nr_last_acked = seqnr;
791 }
792
793 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
794 struct nfs4_slot *slot)
795 {
796 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
797 if (!IS_ERR(task))
798 rpc_put_task_async(task);
799 }
800
801 static int nfs41_sequence_process(struct rpc_task *task,
802 struct nfs4_sequence_res *res)
803 {
804 struct nfs4_session *session;
805 struct nfs4_slot *slot = res->sr_slot;
806 struct nfs_client *clp;
807 int status;
808 int ret = 1;
809
810 if (slot == NULL)
811 goto out_noaction;
812 /* don't increment the sequence number if the task wasn't sent */
813 if (!RPC_WAS_SENT(task) || slot->seq_done)
814 goto out;
815
816 session = slot->table->session;
817 clp = session->clp;
818
819 trace_nfs4_sequence_done(session, res);
820
821 status = res->sr_status;
822 if (task->tk_status == -NFS4ERR_DEADSESSION)
823 status = -NFS4ERR_DEADSESSION;
824
825 /* Check the SEQUENCE operation status */
826 switch (status) {
827 case 0:
828 /* Mark this sequence number as having been acked */
829 nfs4_slot_sequence_acked(slot, slot->seq_nr);
830 /* Update the slot's sequence and clientid lease timer */
831 slot->seq_done = 1;
832 do_renew_lease(clp, res->sr_timestamp);
833 /* Check sequence flags */
834 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
835 !!slot->privileged);
836 nfs41_update_target_slotid(slot->table, slot, res);
837 break;
838 case 1:
839 /*
840 * sr_status remains 1 if an RPC level error occurred.
841 * The server may or may not have processed the sequence
842 * operation..
843 */
844 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
845 slot->seq_done = 1;
846 goto out;
847 case -NFS4ERR_DELAY:
848 /* The server detected a resend of the RPC call and
849 * returned NFS4ERR_DELAY as per Section 2.10.6.2
850 * of RFC5661.
851 */
852 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
853 __func__,
854 slot->slot_nr,
855 slot->seq_nr);
856 nfs4_slot_sequence_acked(slot, slot->seq_nr);
857 goto out_retry;
858 case -NFS4ERR_RETRY_UNCACHED_REP:
859 case -NFS4ERR_SEQ_FALSE_RETRY:
860 /*
861 * The server thinks we tried to replay a request.
862 * Retry the call after bumping the sequence ID.
863 */
864 nfs4_slot_sequence_acked(slot, slot->seq_nr);
865 goto retry_new_seq;
866 case -NFS4ERR_BADSLOT:
867 /*
868 * The slot id we used was probably retired. Try again
869 * using a different slot id.
870 */
871 if (slot->slot_nr < slot->table->target_highest_slotid)
872 goto session_recover;
873 goto retry_nowait;
874 case -NFS4ERR_SEQ_MISORDERED:
875 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
876 /*
877 * Were one or more calls using this slot interrupted?
878 * If the server never received the request, then our
879 * transmitted slot sequence number may be too high. However,
880 * if the server did receive the request then it might
881 * accidentally give us a reply with a mismatched operation.
882 * We can sort this out by sending a lone sequence operation
883 * to the server on the same slot.
884 */
885 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
886 slot->seq_nr--;
887 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
888 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
889 res->sr_slot = NULL;
890 }
891 goto retry_nowait;
892 }
893 /*
894 * RFC5661:
895 * A retry might be sent while the original request is
896 * still in progress on the replier. The replier SHOULD
897 * deal with the issue by returning NFS4ERR_DELAY as the
898 * reply to SEQUENCE or CB_SEQUENCE operation, but
899 * implementations MAY return NFS4ERR_SEQ_MISORDERED.
900 *
901 * Restart the search after a delay.
902 */
903 slot->seq_nr = slot->seq_nr_highest_sent;
904 goto out_retry;
905 case -NFS4ERR_BADSESSION:
906 case -NFS4ERR_DEADSESSION:
907 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
908 goto session_recover;
909 default:
910 /* Just update the slot sequence no. */
911 slot->seq_done = 1;
912 }
913 out:
914 /* The session may be reset by one of the error handlers. */
915 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
916 out_noaction:
917 return ret;
918 session_recover:
919 nfs4_schedule_session_recovery(session, status);
920 dprintk("%s ERROR: %d Reset session\n", __func__, status);
921 nfs41_sequence_free_slot(res);
922 goto out;
923 retry_new_seq:
924 ++slot->seq_nr;
925 retry_nowait:
926 if (rpc_restart_call_prepare(task)) {
927 nfs41_sequence_free_slot(res);
928 task->tk_status = 0;
929 ret = 0;
930 }
931 goto out;
932 out_retry:
933 if (!rpc_restart_call(task))
934 goto out;
935 rpc_delay(task, NFS4_POLL_RETRY_MAX);
936 return 0;
937 }
938
939 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
940 {
941 if (!nfs41_sequence_process(task, res))
942 return 0;
943 if (res->sr_slot != NULL)
944 nfs41_sequence_free_slot(res);
945 return 1;
946
947 }
948 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
949
950 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
951 {
952 if (res->sr_slot == NULL)
953 return 1;
954 if (res->sr_slot->table->session != NULL)
955 return nfs41_sequence_process(task, res);
956 return nfs40_sequence_done(task, res);
957 }
958
959 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
960 {
961 if (res->sr_slot != NULL) {
962 if (res->sr_slot->table->session != NULL)
963 nfs41_sequence_free_slot(res);
964 else
965 nfs40_sequence_free_slot(res);
966 }
967 }
968
969 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
970 {
971 if (res->sr_slot == NULL)
972 return 1;
973 if (!res->sr_slot->table->session)
974 return nfs40_sequence_done(task, res);
975 return nfs41_sequence_done(task, res);
976 }
977 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
978
979 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
980 {
981 struct nfs4_call_sync_data *data = calldata;
982
983 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
984
985 nfs4_setup_sequence(data->seq_server->nfs_client,
986 data->seq_args, data->seq_res, task);
987 }
988
989 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
990 {
991 struct nfs4_call_sync_data *data = calldata;
992
993 nfs41_sequence_done(task, data->seq_res);
994 }
995
996 static const struct rpc_call_ops nfs41_call_sync_ops = {
997 .rpc_call_prepare = nfs41_call_sync_prepare,
998 .rpc_call_done = nfs41_call_sync_done,
999 };
1000
1001 #else /* !CONFIG_NFS_V4_1 */
1002
1003 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
1004 {
1005 return nfs40_sequence_done(task, res);
1006 }
1007
1008 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
1009 {
1010 if (res->sr_slot != NULL)
1011 nfs40_sequence_free_slot(res);
1012 }
1013
1014 int nfs4_sequence_done(struct rpc_task *task,
1015 struct nfs4_sequence_res *res)
1016 {
1017 return nfs40_sequence_done(task, res);
1018 }
1019 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1020
1021 #endif /* !CONFIG_NFS_V4_1 */
1022
1023 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
1024 {
1025 res->sr_timestamp = jiffies;
1026 res->sr_status_flags = 0;
1027 res->sr_status = 1;
1028 }
1029
1030 static
1031 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
1032 struct nfs4_sequence_res *res,
1033 struct nfs4_slot *slot)
1034 {
1035 if (!slot)
1036 return;
1037 slot->privileged = args->sa_privileged ? 1 : 0;
1038 args->sa_slot = slot;
1039
1040 res->sr_slot = slot;
1041 }
1042
1043 int nfs4_setup_sequence(struct nfs_client *client,
1044 struct nfs4_sequence_args *args,
1045 struct nfs4_sequence_res *res,
1046 struct rpc_task *task)
1047 {
1048 struct nfs4_session *session = nfs4_get_session(client);
1049 struct nfs4_slot_table *tbl = client->cl_slot_tbl;
1050 struct nfs4_slot *slot;
1051
1052 /* slot already allocated? */
1053 if (res->sr_slot != NULL)
1054 goto out_start;
1055
1056 if (session)
1057 tbl = &session->fc_slot_table;
1058
1059 spin_lock(&tbl->slot_tbl_lock);
1060 /* The state manager will wait until the slot table is empty */
1061 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
1062 goto out_sleep;
1063
1064 slot = nfs4_alloc_slot(tbl);
1065 if (IS_ERR(slot)) {
1066 if (slot == ERR_PTR(-ENOMEM))
1067 goto out_sleep_timeout;
1068 goto out_sleep;
1069 }
1070 spin_unlock(&tbl->slot_tbl_lock);
1071
1072 nfs4_sequence_attach_slot(args, res, slot);
1073
1074 trace_nfs4_setup_sequence(session, args);
1075 out_start:
1076 nfs41_sequence_res_init(res);
1077 rpc_call_start(task);
1078 return 0;
1079 out_sleep_timeout:
1080 /* Try again in 1/4 second */
1081 if (args->sa_privileged)
1082 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
1083 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
1084 else
1085 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
1086 NULL, jiffies + (HZ >> 2));
1087 spin_unlock(&tbl->slot_tbl_lock);
1088 return -EAGAIN;
1089 out_sleep:
1090 if (args->sa_privileged)
1091 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
1092 RPC_PRIORITY_PRIVILEGED);
1093 else
1094 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
1095 spin_unlock(&tbl->slot_tbl_lock);
1096 return -EAGAIN;
1097 }
1098 EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
1099
1100 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
1101 {
1102 struct nfs4_call_sync_data *data = calldata;
1103 nfs4_setup_sequence(data->seq_server->nfs_client,
1104 data->seq_args, data->seq_res, task);
1105 }
1106
1107 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
1108 {
1109 struct nfs4_call_sync_data *data = calldata;
1110 nfs4_sequence_done(task, data->seq_res);
1111 }
1112
1113 static const struct rpc_call_ops nfs40_call_sync_ops = {
1114 .rpc_call_prepare = nfs40_call_sync_prepare,
1115 .rpc_call_done = nfs40_call_sync_done,
1116 };
1117
1118 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
1119 {
1120 int ret;
1121 struct rpc_task *task;
1122
1123 task = rpc_run_task(task_setup);
1124 if (IS_ERR(task))
1125 return PTR_ERR(task);
1126
1127 ret = task->tk_status;
1128 rpc_put_task(task);
1129 return ret;
1130 }
1131
1132 static int nfs4_do_call_sync(struct rpc_clnt *clnt,
1133 struct nfs_server *server,
1134 struct rpc_message *msg,
1135 struct nfs4_sequence_args *args,
1136 struct nfs4_sequence_res *res,
1137 unsigned short task_flags)
1138 {
1139 struct nfs_client *clp = server->nfs_client;
1140 struct nfs4_call_sync_data data = {
1141 .seq_server = server,
1142 .seq_args = args,
1143 .seq_res = res,
1144 };
1145 struct rpc_task_setup task_setup = {
1146 .rpc_client = clnt,
1147 .rpc_message = msg,
1148 .callback_ops = clp->cl_mvops->call_sync_ops,
1149 .callback_data = &data,
1150 .flags = task_flags,
1151 };
1152
1153 return nfs4_call_sync_custom(&task_setup);
1154 }
1155
1156 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1157 struct nfs_server *server,
1158 struct rpc_message *msg,
1159 struct nfs4_sequence_args *args,
1160 struct nfs4_sequence_res *res)
1161 {
1162 return nfs4_do_call_sync(clnt, server, msg, args, res, 0);
1163 }
1164
1165
1166 int nfs4_call_sync(struct rpc_clnt *clnt,
1167 struct nfs_server *server,
1168 struct rpc_message *msg,
1169 struct nfs4_sequence_args *args,
1170 struct nfs4_sequence_res *res,
1171 int cache_reply)
1172 {
1173 nfs4_init_sequence(args, res, cache_reply, 0);
1174 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1175 }
1176
1177 static void
1178 nfs4_inc_nlink_locked(struct inode *inode)
1179 {
1180 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
1181 inc_nlink(inode);
1182 }
1183
1184 static void
1185 nfs4_dec_nlink_locked(struct inode *inode)
1186 {
1187 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
1188 drop_nlink(inode);
1189 }
1190
1191 static void
1192 nfs4_update_changeattr_locked(struct inode *inode,
1193 struct nfs4_change_info *cinfo,
1194 unsigned long timestamp, unsigned long cache_validity)
1195 {
1196 struct nfs_inode *nfsi = NFS_I(inode);
1197
1198 nfsi->cache_validity |= NFS_INO_INVALID_CTIME
1199 | NFS_INO_INVALID_MTIME
1200 | cache_validity;
1201
1202 if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(inode)) {
1203 nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
1204 nfsi->attrtimeo_timestamp = jiffies;
1205 } else {
1206 if (S_ISDIR(inode->i_mode)) {
1207 nfsi->cache_validity |= NFS_INO_INVALID_DATA;
1208 nfs_force_lookup_revalidate(inode);
1209 } else {
1210 if (!NFS_PROTO(inode)->have_delegation(inode,
1211 FMODE_READ))
1212 nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
1213 }
1214
1215 if (cinfo->before != inode_peek_iversion_raw(inode))
1216 nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
1217 NFS_INO_INVALID_ACL |
1218 NFS_INO_INVALID_XATTR;
1219 }
1220 inode_set_iversion_raw(inode, cinfo->after);
1221 nfsi->read_cache_jiffies = timestamp;
1222 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1223 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
1224
1225 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1226 nfs_fscache_invalidate(inode);
1227 }
1228
1229 void
1230 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1231 unsigned long timestamp, unsigned long cache_validity)
1232 {
1233 spin_lock(&dir->i_lock);
1234 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
1235 spin_unlock(&dir->i_lock);
1236 }
1237
1238 struct nfs4_open_createattrs {
1239 struct nfs4_label *label;
1240 struct iattr *sattr;
1241 const __u32 verf[2];
1242 };
1243
1244 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1245 int err, struct nfs4_exception *exception)
1246 {
1247 if (err != -EINVAL)
1248 return false;
1249 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1250 return false;
1251 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1252 exception->retry = 1;
1253 return true;
1254 }
1255
1256 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
1257 {
1258 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
1259 }
1260
1261 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
1262 {
1263 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
1264
1265 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
1266 }
1267
1268 static u32
1269 nfs4_map_atomic_open_share(struct nfs_server *server,
1270 fmode_t fmode, int openflags)
1271 {
1272 u32 res = 0;
1273
1274 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1275 case FMODE_READ:
1276 res = NFS4_SHARE_ACCESS_READ;
1277 break;
1278 case FMODE_WRITE:
1279 res = NFS4_SHARE_ACCESS_WRITE;
1280 break;
1281 case FMODE_READ|FMODE_WRITE:
1282 res = NFS4_SHARE_ACCESS_BOTH;
1283 }
1284 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1285 goto out;
1286 /* Want no delegation if we're using O_DIRECT */
1287 if (openflags & O_DIRECT)
1288 res |= NFS4_SHARE_WANT_NO_DELEG;
1289 out:
1290 return res;
1291 }
1292
1293 static enum open_claim_type4
1294 nfs4_map_atomic_open_claim(struct nfs_server *server,
1295 enum open_claim_type4 claim)
1296 {
1297 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1298 return claim;
1299 switch (claim) {
1300 default:
1301 return claim;
1302 case NFS4_OPEN_CLAIM_FH:
1303 return NFS4_OPEN_CLAIM_NULL;
1304 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1305 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1306 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1307 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1308 }
1309 }
1310
1311 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1312 {
1313 p->o_res.f_attr = &p->f_attr;
1314 p->o_res.f_label = p->f_label;
1315 p->o_res.seqid = p->o_arg.seqid;
1316 p->c_res.seqid = p->c_arg.seqid;
1317 p->o_res.server = p->o_arg.server;
1318 p->o_res.access_request = p->o_arg.access;
1319 nfs_fattr_init(&p->f_attr);
1320 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1321 }
1322
1323 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1324 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1325 const struct nfs4_open_createattrs *c,
1326 enum open_claim_type4 claim,
1327 gfp_t gfp_mask)
1328 {
1329 struct dentry *parent = dget_parent(dentry);
1330 struct inode *dir = d_inode(parent);
1331 struct nfs_server *server = NFS_SERVER(dir);
1332 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1333 struct nfs4_label *label = (c != NULL) ? c->label : NULL;
1334 struct nfs4_opendata *p;
1335
1336 p = kzalloc(sizeof(*p), gfp_mask);
1337 if (p == NULL)
1338 goto err;
1339
1340 p->f_label = nfs4_label_alloc(server, gfp_mask);
1341 if (IS_ERR(p->f_label))
1342 goto err_free_p;
1343
1344 p->a_label = nfs4_label_alloc(server, gfp_mask);
1345 if (IS_ERR(p->a_label))
1346 goto err_free_f;
1347
1348 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1349 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1350 if (IS_ERR(p->o_arg.seqid))
1351 goto err_free_label;
1352 nfs_sb_active(dentry->d_sb);
1353 p->dentry = dget(dentry);
1354 p->dir = parent;
1355 p->owner = sp;
1356 atomic_inc(&sp->so_count);
1357 p->o_arg.open_flags = flags;
1358 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1359 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1360 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1361 fmode, flags);
1362 if (flags & O_CREAT) {
1363 p->o_arg.umask = current_umask();
1364 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1365 if (c->sattr != NULL && c->sattr->ia_valid != 0) {
1366 p->o_arg.u.attrs = &p->attrs;
1367 memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
1368
1369 memcpy(p->o_arg.u.verifier.data, c->verf,
1370 sizeof(p->o_arg.u.verifier.data));
1371 }
1372 }
1373 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1374 * will return permission denied for all bits until close */
1375 if (!(flags & O_EXCL)) {
1376 /* ask server to check for all possible rights as results
1377 * are cached */
1378 switch (p->o_arg.claim) {
1379 default:
1380 break;
1381 case NFS4_OPEN_CLAIM_NULL:
1382 case NFS4_OPEN_CLAIM_FH:
1383 p->o_arg.access = NFS4_ACCESS_READ |
1384 NFS4_ACCESS_MODIFY |
1385 NFS4_ACCESS_EXTEND |
1386 NFS4_ACCESS_EXECUTE;
1387 #ifdef CONFIG_NFS_V4_2
1388 if (server->caps & NFS_CAP_XATTR)
1389 p->o_arg.access |= NFS4_ACCESS_XAREAD |
1390 NFS4_ACCESS_XAWRITE |
1391 NFS4_ACCESS_XALIST;
1392 #endif
1393 }
1394 }
1395 p->o_arg.clientid = server->nfs_client->cl_clientid;
1396 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1397 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1398 p->o_arg.name = &dentry->d_name;
1399 p->o_arg.server = server;
1400 p->o_arg.bitmask = nfs4_bitmask(server, label);
1401 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1402 switch (p->o_arg.claim) {
1403 case NFS4_OPEN_CLAIM_NULL:
1404 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1405 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1406 p->o_arg.fh = NFS_FH(dir);
1407 break;
1408 case NFS4_OPEN_CLAIM_PREVIOUS:
1409 case NFS4_OPEN_CLAIM_FH:
1410 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1411 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1412 p->o_arg.fh = NFS_FH(d_inode(dentry));
1413 }
1414 p->c_arg.fh = &p->o_res.fh;
1415 p->c_arg.stateid = &p->o_res.stateid;
1416 p->c_arg.seqid = p->o_arg.seqid;
1417 nfs4_init_opendata_res(p);
1418 kref_init(&p->kref);
1419 return p;
1420
1421 err_free_label:
1422 nfs4_label_free(p->a_label);
1423 err_free_f:
1424 nfs4_label_free(p->f_label);
1425 err_free_p:
1426 kfree(p);
1427 err:
1428 dput(parent);
1429 return NULL;
1430 }
1431
1432 static void nfs4_opendata_free(struct kref *kref)
1433 {
1434 struct nfs4_opendata *p = container_of(kref,
1435 struct nfs4_opendata, kref);
1436 struct super_block *sb = p->dentry->d_sb;
1437
1438 nfs4_lgopen_release(p->lgp);
1439 nfs_free_seqid(p->o_arg.seqid);
1440 nfs4_sequence_free_slot(&p->o_res.seq_res);
1441 if (p->state != NULL)
1442 nfs4_put_open_state(p->state);
1443 nfs4_put_state_owner(p->owner);
1444
1445 nfs4_label_free(p->a_label);
1446 nfs4_label_free(p->f_label);
1447
1448 dput(p->dir);
1449 dput(p->dentry);
1450 nfs_sb_deactive(sb);
1451 nfs_fattr_free_names(&p->f_attr);
1452 kfree(p->f_attr.mdsthreshold);
1453 kfree(p);
1454 }
1455
1456 static void nfs4_opendata_put(struct nfs4_opendata *p)
1457 {
1458 if (p != NULL)
1459 kref_put(&p->kref, nfs4_opendata_free);
1460 }
1461
1462 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1463 fmode_t fmode)
1464 {
1465 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1466 case FMODE_READ|FMODE_WRITE:
1467 return state->n_rdwr != 0;
1468 case FMODE_WRITE:
1469 return state->n_wronly != 0;
1470 case FMODE_READ:
1471 return state->n_rdonly != 0;
1472 }
1473 WARN_ON_ONCE(1);
1474 return false;
1475 }
1476
1477 static int can_open_cached(struct nfs4_state *state, fmode_t mode,
1478 int open_mode, enum open_claim_type4 claim)
1479 {
1480 int ret = 0;
1481
1482 if (open_mode & (O_EXCL|O_TRUNC))
1483 goto out;
1484 switch (claim) {
1485 case NFS4_OPEN_CLAIM_NULL:
1486 case NFS4_OPEN_CLAIM_FH:
1487 goto out;
1488 default:
1489 break;
1490 }
1491 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1492 case FMODE_READ:
1493 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1494 && state->n_rdonly != 0;
1495 break;
1496 case FMODE_WRITE:
1497 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1498 && state->n_wronly != 0;
1499 break;
1500 case FMODE_READ|FMODE_WRITE:
1501 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1502 && state->n_rdwr != 0;
1503 }
1504 out:
1505 return ret;
1506 }
1507
1508 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1509 enum open_claim_type4 claim)
1510 {
1511 if (delegation == NULL)
1512 return 0;
1513 if ((delegation->type & fmode) != fmode)
1514 return 0;
1515 switch (claim) {
1516 case NFS4_OPEN_CLAIM_NULL:
1517 case NFS4_OPEN_CLAIM_FH:
1518 break;
1519 case NFS4_OPEN_CLAIM_PREVIOUS:
1520 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1521 break;
1522 fallthrough;
1523 default:
1524 return 0;
1525 }
1526 nfs_mark_delegation_referenced(delegation);
1527 return 1;
1528 }
1529
1530 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1531 {
1532 switch (fmode) {
1533 case FMODE_WRITE:
1534 state->n_wronly++;
1535 break;
1536 case FMODE_READ:
1537 state->n_rdonly++;
1538 break;
1539 case FMODE_READ|FMODE_WRITE:
1540 state->n_rdwr++;
1541 }
1542 nfs4_state_set_mode_locked(state, state->state | fmode);
1543 }
1544
1545 #ifdef CONFIG_NFS_V4_1
1546 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
1547 {
1548 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
1549 return true;
1550 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
1551 return true;
1552 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
1553 return true;
1554 return false;
1555 }
1556 #endif /* CONFIG_NFS_V4_1 */
1557
1558 static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
1559 {
1560 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1561 wake_up_all(&state->waitq);
1562 }
1563
1564 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1565 {
1566 struct nfs_client *clp = state->owner->so_server->nfs_client;
1567 bool need_recover = false;
1568
1569 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1570 need_recover = true;
1571 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1572 need_recover = true;
1573 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1574 need_recover = true;
1575 if (need_recover)
1576 nfs4_state_mark_reclaim_nograce(clp, state);
1577 }
1578
1579 /*
1580 * Check for whether or not the caller may update the open stateid
1581 * to the value passed in by stateid.
1582 *
1583 * Note: This function relies heavily on the server implementing
1584 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
1585 * correctly.
1586 * i.e. The stateid seqids have to be initialised to 1, and
1587 * are then incremented on every state transition.
1588 */
1589 static bool nfs_stateid_is_sequential(struct nfs4_state *state,
1590 const nfs4_stateid *stateid)
1591 {
1592 if (test_bit(NFS_OPEN_STATE, &state->flags)) {
1593 /* The common case - we're updating to a new sequence number */
1594 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1595 nfs4_stateid_is_next(&state->open_stateid, stateid)) {
1596 return true;
1597 }
1598 } else {
1599 /* This is the first OPEN in this generation */
1600 if (stateid->seqid == cpu_to_be32(1))
1601 return true;
1602 }
1603 return false;
1604 }
1605
1606 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1607 {
1608 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1609 return;
1610 if (state->n_wronly)
1611 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1612 if (state->n_rdonly)
1613 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1614 if (state->n_rdwr)
1615 set_bit(NFS_O_RDWR_STATE, &state->flags);
1616 set_bit(NFS_OPEN_STATE, &state->flags);
1617 }
1618
1619 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1620 nfs4_stateid *stateid, fmode_t fmode)
1621 {
1622 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1623 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1624 case FMODE_WRITE:
1625 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1626 break;
1627 case FMODE_READ:
1628 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1629 break;
1630 case 0:
1631 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1632 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1633 clear_bit(NFS_OPEN_STATE, &state->flags);
1634 }
1635 if (stateid == NULL)
1636 return;
1637 /* Handle OPEN+OPEN_DOWNGRADE races */
1638 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1639 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1640 nfs_resync_open_stateid_locked(state);
1641 goto out;
1642 }
1643 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1644 nfs4_stateid_copy(&state->stateid, stateid);
1645 nfs4_stateid_copy(&state->open_stateid, stateid);
1646 trace_nfs4_open_stateid_update(state->inode, stateid, 0);
1647 out:
1648 nfs_state_log_update_open_stateid(state);
1649 }
1650
1651 static void nfs_clear_open_stateid(struct nfs4_state *state,
1652 nfs4_stateid *arg_stateid,
1653 nfs4_stateid *stateid, fmode_t fmode)
1654 {
1655 write_seqlock(&state->seqlock);
1656 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1657 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1658 nfs_clear_open_stateid_locked(state, stateid, fmode);
1659 write_sequnlock(&state->seqlock);
1660 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1661 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1662 }
1663
1664 static void nfs_set_open_stateid_locked(struct nfs4_state *state,
1665 const nfs4_stateid *stateid, nfs4_stateid *freeme)
1666 __must_hold(&state->owner->so_lock)
1667 __must_hold(&state->seqlock)
1668 __must_hold(RCU)
1669
1670 {
1671 DEFINE_WAIT(wait);
1672 int status = 0;
1673 for (;;) {
1674
1675 if (nfs_stateid_is_sequential(state, stateid))
1676 break;
1677
1678 if (status)
1679 break;
1680 /* Rely on seqids for serialisation with NFSv4.0 */
1681 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
1682 break;
1683
1684 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1685 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
1686 /*
1687 * Ensure we process the state changes in the same order
1688 * in which the server processed them by delaying the
1689 * update of the stateid until we are in sequence.
1690 */
1691 write_sequnlock(&state->seqlock);
1692 spin_unlock(&state->owner->so_lock);
1693 rcu_read_unlock();
1694 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
1695
1696 if (!signal_pending(current)) {
1697 if (schedule_timeout(5*HZ) == 0)
1698 status = -EAGAIN;
1699 else
1700 status = 0;
1701 } else
1702 status = -EINTR;
1703 finish_wait(&state->waitq, &wait);
1704 rcu_read_lock();
1705 spin_lock(&state->owner->so_lock);
1706 write_seqlock(&state->seqlock);
1707 }
1708
1709 if (test_bit(NFS_OPEN_STATE, &state->flags) &&
1710 !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1711 nfs4_stateid_copy(freeme, &state->open_stateid);
1712 nfs_test_and_clear_all_open_stateid(state);
1713 }
1714
1715 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1716 nfs4_stateid_copy(&state->stateid, stateid);
1717 nfs4_stateid_copy(&state->open_stateid, stateid);
1718 trace_nfs4_open_stateid_update(state->inode, stateid, status);
1719 nfs_state_log_update_open_stateid(state);
1720 }
1721
1722 static void nfs_state_set_open_stateid(struct nfs4_state *state,
1723 const nfs4_stateid *open_stateid,
1724 fmode_t fmode,
1725 nfs4_stateid *freeme)
1726 {
1727 /*
1728 * Protect the call to nfs4_state_set_mode_locked and
1729 * serialise the stateid update
1730 */
1731 write_seqlock(&state->seqlock);
1732 nfs_set_open_stateid_locked(state, open_stateid, freeme);
1733 switch (fmode) {
1734 case FMODE_READ:
1735 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1736 break;
1737 case FMODE_WRITE:
1738 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1739 break;
1740 case FMODE_READ|FMODE_WRITE:
1741 set_bit(NFS_O_RDWR_STATE, &state->flags);
1742 }
1743 set_bit(NFS_OPEN_STATE, &state->flags);
1744 write_sequnlock(&state->seqlock);
1745 }
1746
1747 static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
1748 {
1749 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1750 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1751 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1752 clear_bit(NFS_OPEN_STATE, &state->flags);
1753 }
1754
1755 static void nfs_state_set_delegation(struct nfs4_state *state,
1756 const nfs4_stateid *deleg_stateid,
1757 fmode_t fmode)
1758 {
1759 /*
1760 * Protect the call to nfs4_state_set_mode_locked and
1761 * serialise the stateid update
1762 */
1763 write_seqlock(&state->seqlock);
1764 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1765 set_bit(NFS_DELEGATED_STATE, &state->flags);
1766 write_sequnlock(&state->seqlock);
1767 }
1768
1769 static void nfs_state_clear_delegation(struct nfs4_state *state)
1770 {
1771 write_seqlock(&state->seqlock);
1772 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1773 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1774 write_sequnlock(&state->seqlock);
1775 }
1776
1777 int update_open_stateid(struct nfs4_state *state,
1778 const nfs4_stateid *open_stateid,
1779 const nfs4_stateid *delegation,
1780 fmode_t fmode)
1781 {
1782 struct nfs_server *server = NFS_SERVER(state->inode);
1783 struct nfs_client *clp = server->nfs_client;
1784 struct nfs_inode *nfsi = NFS_I(state->inode);
1785 struct nfs_delegation *deleg_cur;
1786 nfs4_stateid freeme = { };
1787 int ret = 0;
1788
1789 fmode &= (FMODE_READ|FMODE_WRITE);
1790
1791 rcu_read_lock();
1792 spin_lock(&state->owner->so_lock);
1793 if (open_stateid != NULL) {
1794 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
1795 ret = 1;
1796 }
1797
1798 deleg_cur = nfs4_get_valid_delegation(state->inode);
1799 if (deleg_cur == NULL)
1800 goto no_delegation;
1801
1802 spin_lock(&deleg_cur->lock);
1803 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1804 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1805 (deleg_cur->type & fmode) != fmode)
1806 goto no_delegation_unlock;
1807
1808 if (delegation == NULL)
1809 delegation = &deleg_cur->stateid;
1810 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation))
1811 goto no_delegation_unlock;
1812
1813 nfs_mark_delegation_referenced(deleg_cur);
1814 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
1815 ret = 1;
1816 no_delegation_unlock:
1817 spin_unlock(&deleg_cur->lock);
1818 no_delegation:
1819 if (ret)
1820 update_open_stateflags(state, fmode);
1821 spin_unlock(&state->owner->so_lock);
1822 rcu_read_unlock();
1823
1824 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1825 nfs4_schedule_state_manager(clp);
1826 if (freeme.type != 0)
1827 nfs4_test_and_free_stateid(server, &freeme,
1828 state->owner->so_cred);
1829
1830 return ret;
1831 }
1832
1833 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1834 const nfs4_stateid *stateid)
1835 {
1836 struct nfs4_state *state = lsp->ls_state;
1837 bool ret = false;
1838
1839 spin_lock(&state->state_lock);
1840 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1841 goto out_noupdate;
1842 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1843 goto out_noupdate;
1844 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1845 ret = true;
1846 out_noupdate:
1847 spin_unlock(&state->state_lock);
1848 return ret;
1849 }
1850
1851 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1852 {
1853 struct nfs_delegation *delegation;
1854
1855 fmode &= FMODE_READ|FMODE_WRITE;
1856 rcu_read_lock();
1857 delegation = nfs4_get_valid_delegation(inode);
1858 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1859 rcu_read_unlock();
1860 return;
1861 }
1862 rcu_read_unlock();
1863 nfs4_inode_return_delegation(inode);
1864 }
1865
1866 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1867 {
1868 struct nfs4_state *state = opendata->state;
1869 struct nfs_delegation *delegation;
1870 int open_mode = opendata->o_arg.open_flags;
1871 fmode_t fmode = opendata->o_arg.fmode;
1872 enum open_claim_type4 claim = opendata->o_arg.claim;
1873 nfs4_stateid stateid;
1874 int ret = -EAGAIN;
1875
1876 for (;;) {
1877 spin_lock(&state->owner->so_lock);
1878 if (can_open_cached(state, fmode, open_mode, claim)) {
1879 update_open_stateflags(state, fmode);
1880 spin_unlock(&state->owner->so_lock);
1881 goto out_return_state;
1882 }
1883 spin_unlock(&state->owner->so_lock);
1884 rcu_read_lock();
1885 delegation = nfs4_get_valid_delegation(state->inode);
1886 if (!can_open_delegated(delegation, fmode, claim)) {
1887 rcu_read_unlock();
1888 break;
1889 }
1890 /* Save the delegation */
1891 nfs4_stateid_copy(&stateid, &delegation->stateid);
1892 rcu_read_unlock();
1893 nfs_release_seqid(opendata->o_arg.seqid);
1894 if (!opendata->is_recover) {
1895 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1896 if (ret != 0)
1897 goto out;
1898 }
1899 ret = -EAGAIN;
1900
1901 /* Try to update the stateid using the delegation */
1902 if (update_open_stateid(state, NULL, &stateid, fmode))
1903 goto out_return_state;
1904 }
1905 out:
1906 return ERR_PTR(ret);
1907 out_return_state:
1908 refcount_inc(&state->count);
1909 return state;
1910 }
1911
1912 static void
1913 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1914 {
1915 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1916 struct nfs_delegation *delegation;
1917 int delegation_flags = 0;
1918
1919 rcu_read_lock();
1920 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1921 if (delegation)
1922 delegation_flags = delegation->flags;
1923 rcu_read_unlock();
1924 switch (data->o_arg.claim) {
1925 default:
1926 break;
1927 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1928 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1929 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1930 "returning a delegation for "
1931 "OPEN(CLAIM_DELEGATE_CUR)\n",
1932 clp->cl_hostname);
1933 return;
1934 }
1935 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1936 nfs_inode_set_delegation(state->inode,
1937 data->owner->so_cred,
1938 data->o_res.delegation_type,
1939 &data->o_res.delegation,
1940 data->o_res.pagemod_limit);
1941 else
1942 nfs_inode_reclaim_delegation(state->inode,
1943 data->owner->so_cred,
1944 data->o_res.delegation_type,
1945 &data->o_res.delegation,
1946 data->o_res.pagemod_limit);
1947
1948 if (data->o_res.do_recall)
1949 nfs_async_inode_return_delegation(state->inode,
1950 &data->o_res.delegation);
1951 }
1952
1953 /*
1954 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1955 * and update the nfs4_state.
1956 */
1957 static struct nfs4_state *
1958 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1959 {
1960 struct inode *inode = data->state->inode;
1961 struct nfs4_state *state = data->state;
1962 int ret;
1963
1964 if (!data->rpc_done) {
1965 if (data->rpc_status)
1966 return ERR_PTR(data->rpc_status);
1967 /* cached opens have already been processed */
1968 goto update;
1969 }
1970
1971 ret = nfs_refresh_inode(inode, &data->f_attr);
1972 if (ret)
1973 return ERR_PTR(ret);
1974
1975 if (data->o_res.delegation_type != 0)
1976 nfs4_opendata_check_deleg(data, state);
1977 update:
1978 if (!update_open_stateid(state, &data->o_res.stateid,
1979 NULL, data->o_arg.fmode))
1980 return ERR_PTR(-EAGAIN);
1981 refcount_inc(&state->count);
1982
1983 return state;
1984 }
1985
1986 static struct inode *
1987 nfs4_opendata_get_inode(struct nfs4_opendata *data)
1988 {
1989 struct inode *inode;
1990
1991 switch (data->o_arg.claim) {
1992 case NFS4_OPEN_CLAIM_NULL:
1993 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1994 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1995 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1996 return ERR_PTR(-EAGAIN);
1997 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
1998 &data->f_attr, data->f_label);
1999 break;
2000 default:
2001 inode = d_inode(data->dentry);
2002 ihold(inode);
2003 nfs_refresh_inode(inode, &data->f_attr);
2004 }
2005 return inode;
2006 }
2007
2008 static struct nfs4_state *
2009 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
2010 {
2011 struct nfs4_state *state;
2012 struct inode *inode;
2013
2014 inode = nfs4_opendata_get_inode(data);
2015 if (IS_ERR(inode))
2016 return ERR_CAST(inode);
2017 if (data->state != NULL && data->state->inode == inode) {
2018 state = data->state;
2019 refcount_inc(&state->count);
2020 } else
2021 state = nfs4_get_open_state(inode, data->owner);
2022 iput(inode);
2023 if (state == NULL)
2024 state = ERR_PTR(-ENOMEM);
2025 return state;
2026 }
2027
2028 static struct nfs4_state *
2029 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2030 {
2031 struct nfs4_state *state;
2032
2033 if (!data->rpc_done) {
2034 state = nfs4_try_open_cached(data);
2035 trace_nfs4_cached_open(data->state);
2036 goto out;
2037 }
2038
2039 state = nfs4_opendata_find_nfs4_state(data);
2040 if (IS_ERR(state))
2041 goto out;
2042
2043 if (data->o_res.delegation_type != 0)
2044 nfs4_opendata_check_deleg(data, state);
2045 if (!update_open_stateid(state, &data->o_res.stateid,
2046 NULL, data->o_arg.fmode)) {
2047 nfs4_put_open_state(state);
2048 state = ERR_PTR(-EAGAIN);
2049 }
2050 out:
2051 nfs_release_seqid(data->o_arg.seqid);
2052 return state;
2053 }
2054
2055 static struct nfs4_state *
2056 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2057 {
2058 struct nfs4_state *ret;
2059
2060 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
2061 ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
2062 else
2063 ret = _nfs4_opendata_to_nfs4_state(data);
2064 nfs4_sequence_free_slot(&data->o_res.seq_res);
2065 return ret;
2066 }
2067
2068 static struct nfs_open_context *
2069 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
2070 {
2071 struct nfs_inode *nfsi = NFS_I(state->inode);
2072 struct nfs_open_context *ctx;
2073
2074 rcu_read_lock();
2075 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
2076 if (ctx->state != state)
2077 continue;
2078 if ((ctx->mode & mode) != mode)
2079 continue;
2080 if (!get_nfs_open_context(ctx))
2081 continue;
2082 rcu_read_unlock();
2083 return ctx;
2084 }
2085 rcu_read_unlock();
2086 return ERR_PTR(-ENOENT);
2087 }
2088
2089 static struct nfs_open_context *
2090 nfs4_state_find_open_context(struct nfs4_state *state)
2091 {
2092 struct nfs_open_context *ctx;
2093
2094 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
2095 if (!IS_ERR(ctx))
2096 return ctx;
2097 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
2098 if (!IS_ERR(ctx))
2099 return ctx;
2100 return nfs4_state_find_open_context_mode(state, FMODE_READ);
2101 }
2102
2103 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
2104 struct nfs4_state *state, enum open_claim_type4 claim)
2105 {
2106 struct nfs4_opendata *opendata;
2107
2108 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
2109 NULL, claim, GFP_NOFS);
2110 if (opendata == NULL)
2111 return ERR_PTR(-ENOMEM);
2112 opendata->state = state;
2113 refcount_inc(&state->count);
2114 return opendata;
2115 }
2116
2117 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
2118 fmode_t fmode)
2119 {
2120 struct nfs4_state *newstate;
2121 int ret;
2122
2123 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
2124 return 0;
2125 opendata->o_arg.open_flags = 0;
2126 opendata->o_arg.fmode = fmode;
2127 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
2128 NFS_SB(opendata->dentry->d_sb),
2129 fmode, 0);
2130 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
2131 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
2132 nfs4_init_opendata_res(opendata);
2133 ret = _nfs4_recover_proc_open(opendata);
2134 if (ret != 0)
2135 return ret;
2136 newstate = nfs4_opendata_to_nfs4_state(opendata);
2137 if (IS_ERR(newstate))
2138 return PTR_ERR(newstate);
2139 if (newstate != opendata->state)
2140 ret = -ESTALE;
2141 nfs4_close_state(newstate, fmode);
2142 return ret;
2143 }
2144
2145 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
2146 {
2147 int ret;
2148
2149 /* memory barrier prior to reading state->n_* */
2150 smp_rmb();
2151 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2152 if (ret != 0)
2153 return ret;
2154 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2155 if (ret != 0)
2156 return ret;
2157 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
2158 if (ret != 0)
2159 return ret;
2160 /*
2161 * We may have performed cached opens for all three recoveries.
2162 * Check if we need to update the current stateid.
2163 */
2164 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
2165 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
2166 write_seqlock(&state->seqlock);
2167 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
2168 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2169 write_sequnlock(&state->seqlock);
2170 }
2171 return 0;
2172 }
2173
2174 /*
2175 * OPEN_RECLAIM:
2176 * reclaim state on the server after a reboot.
2177 */
2178 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2179 {
2180 struct nfs_delegation *delegation;
2181 struct nfs4_opendata *opendata;
2182 fmode_t delegation_type = 0;
2183 int status;
2184
2185 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2186 NFS4_OPEN_CLAIM_PREVIOUS);
2187 if (IS_ERR(opendata))
2188 return PTR_ERR(opendata);
2189 rcu_read_lock();
2190 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2191 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
2192 delegation_type = delegation->type;
2193 rcu_read_unlock();
2194 opendata->o_arg.u.delegation_type = delegation_type;
2195 status = nfs4_open_recover(opendata, state);
2196 nfs4_opendata_put(opendata);
2197 return status;
2198 }
2199
2200 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2201 {
2202 struct nfs_server *server = NFS_SERVER(state->inode);
2203 struct nfs4_exception exception = { };
2204 int err;
2205 do {
2206 err = _nfs4_do_open_reclaim(ctx, state);
2207 trace_nfs4_open_reclaim(ctx, 0, err);
2208 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2209 continue;
2210 if (err != -NFS4ERR_DELAY)
2211 break;
2212 nfs4_handle_exception(server, err, &exception);
2213 } while (exception.retry);
2214 return err;
2215 }
2216
2217 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
2218 {
2219 struct nfs_open_context *ctx;
2220 int ret;
2221
2222 ctx = nfs4_state_find_open_context(state);
2223 if (IS_ERR(ctx))
2224 return -EAGAIN;
2225 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2226 nfs_state_clear_open_state_flags(state);
2227 ret = nfs4_do_open_reclaim(ctx, state);
2228 put_nfs_open_context(ctx);
2229 return ret;
2230 }
2231
2232 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
2233 {
2234 switch (err) {
2235 default:
2236 printk(KERN_ERR "NFS: %s: unhandled error "
2237 "%d.\n", __func__, err);
2238 case 0:
2239 case -ENOENT:
2240 case -EAGAIN:
2241 case -ESTALE:
2242 case -ETIMEDOUT:
2243 break;
2244 case -NFS4ERR_BADSESSION:
2245 case -NFS4ERR_BADSLOT:
2246 case -NFS4ERR_BAD_HIGH_SLOT:
2247 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2248 case -NFS4ERR_DEADSESSION:
2249 return -EAGAIN;
2250 case -NFS4ERR_STALE_CLIENTID:
2251 case -NFS4ERR_STALE_STATEID:
2252 /* Don't recall a delegation if it was lost */
2253 nfs4_schedule_lease_recovery(server->nfs_client);
2254 return -EAGAIN;
2255 case -NFS4ERR_MOVED:
2256 nfs4_schedule_migration_recovery(server);
2257 return -EAGAIN;
2258 case -NFS4ERR_LEASE_MOVED:
2259 nfs4_schedule_lease_moved_recovery(server->nfs_client);
2260 return -EAGAIN;
2261 case -NFS4ERR_DELEG_REVOKED:
2262 case -NFS4ERR_ADMIN_REVOKED:
2263 case -NFS4ERR_EXPIRED:
2264 case -NFS4ERR_BAD_STATEID:
2265 case -NFS4ERR_OPENMODE:
2266 nfs_inode_find_state_and_recover(state->inode,
2267 stateid);
2268 nfs4_schedule_stateid_recovery(server, state);
2269 return -EAGAIN;
2270 case -NFS4ERR_DELAY:
2271 case -NFS4ERR_GRACE:
2272 ssleep(1);
2273 return -EAGAIN;
2274 case -ENOMEM:
2275 case -NFS4ERR_DENIED:
2276 if (fl) {
2277 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
2278 if (lsp)
2279 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2280 }
2281 return 0;
2282 }
2283 return err;
2284 }
2285
2286 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2287 struct nfs4_state *state, const nfs4_stateid *stateid)
2288 {
2289 struct nfs_server *server = NFS_SERVER(state->inode);
2290 struct nfs4_opendata *opendata;
2291 int err = 0;
2292
2293 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2294 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
2295 if (IS_ERR(opendata))
2296 return PTR_ERR(opendata);
2297 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2298 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
2299 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2300 if (err)
2301 goto out;
2302 }
2303 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
2304 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2305 if (err)
2306 goto out;
2307 }
2308 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
2309 err = nfs4_open_recover_helper(opendata, FMODE_READ);
2310 if (err)
2311 goto out;
2312 }
2313 nfs_state_clear_delegation(state);
2314 out:
2315 nfs4_opendata_put(opendata);
2316 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2317 }
2318
2319 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
2320 {
2321 struct nfs4_opendata *data = calldata;
2322
2323 nfs4_setup_sequence(data->o_arg.server->nfs_client,
2324 &data->c_arg.seq_args, &data->c_res.seq_res, task);
2325 }
2326
2327 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
2328 {
2329 struct nfs4_opendata *data = calldata;
2330
2331 nfs40_sequence_done(task, &data->c_res.seq_res);
2332
2333 data->rpc_status = task->tk_status;
2334 if (data->rpc_status == 0) {
2335 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
2336 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2337 renew_lease(data->o_res.server, data->timestamp);
2338 data->rpc_done = true;
2339 }
2340 }
2341
2342 static void nfs4_open_confirm_release(void *calldata)
2343 {
2344 struct nfs4_opendata *data = calldata;
2345 struct nfs4_state *state = NULL;
2346
2347 /* If this request hasn't been cancelled, do nothing */
2348 if (!data->cancelled)
2349 goto out_free;
2350 /* In case of error, no cleanup! */
2351 if (!data->rpc_done)
2352 goto out_free;
2353 state = nfs4_opendata_to_nfs4_state(data);
2354 if (!IS_ERR(state))
2355 nfs4_close_state(state, data->o_arg.fmode);
2356 out_free:
2357 nfs4_opendata_put(data);
2358 }
2359
2360 static const struct rpc_call_ops nfs4_open_confirm_ops = {
2361 .rpc_call_prepare = nfs4_open_confirm_prepare,
2362 .rpc_call_done = nfs4_open_confirm_done,
2363 .rpc_release = nfs4_open_confirm_release,
2364 };
2365
2366 /*
2367 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2368 */
2369 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2370 {
2371 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2372 struct rpc_task *task;
2373 struct rpc_message msg = {
2374 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2375 .rpc_argp = &data->c_arg,
2376 .rpc_resp = &data->c_res,
2377 .rpc_cred = data->owner->so_cred,
2378 };
2379 struct rpc_task_setup task_setup_data = {
2380 .rpc_client = server->client,
2381 .rpc_message = &msg,
2382 .callback_ops = &nfs4_open_confirm_ops,
2383 .callback_data = data,
2384 .workqueue = nfsiod_workqueue,
2385 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2386 };
2387 int status;
2388
2389 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1,
2390 data->is_recover);
2391 kref_get(&data->kref);
2392 data->rpc_done = false;
2393 data->rpc_status = 0;
2394 data->timestamp = jiffies;
2395 task = rpc_run_task(&task_setup_data);
2396 if (IS_ERR(task))
2397 return PTR_ERR(task);
2398 status = rpc_wait_for_completion_task(task);
2399 if (status != 0) {
2400 data->cancelled = true;
2401 smp_wmb();
2402 } else
2403 status = data->rpc_status;
2404 rpc_put_task(task);
2405 return status;
2406 }
2407
2408 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2409 {
2410 struct nfs4_opendata *data = calldata;
2411 struct nfs4_state_owner *sp = data->owner;
2412 struct nfs_client *clp = sp->so_server->nfs_client;
2413 enum open_claim_type4 claim = data->o_arg.claim;
2414
2415 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2416 goto out_wait;
2417 /*
2418 * Check if we still need to send an OPEN call, or if we can use
2419 * a delegation instead.
2420 */
2421 if (data->state != NULL) {
2422 struct nfs_delegation *delegation;
2423
2424 if (can_open_cached(data->state, data->o_arg.fmode,
2425 data->o_arg.open_flags, claim))
2426 goto out_no_action;
2427 rcu_read_lock();
2428 delegation = nfs4_get_valid_delegation(data->state->inode);
2429 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2430 goto unlock_no_action;
2431 rcu_read_unlock();
2432 }
2433 /* Update client id. */
2434 data->o_arg.clientid = clp->cl_clientid;
2435 switch (claim) {
2436 default:
2437 break;
2438 case NFS4_OPEN_CLAIM_PREVIOUS:
2439 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2440 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2441 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2442 fallthrough;
2443 case NFS4_OPEN_CLAIM_FH:
2444 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2445 }
2446 data->timestamp = jiffies;
2447 if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
2448 &data->o_arg.seq_args,
2449 &data->o_res.seq_res,
2450 task) != 0)
2451 nfs_release_seqid(data->o_arg.seqid);
2452
2453 /* Set the create mode (note dependency on the session type) */
2454 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2455 if (data->o_arg.open_flags & O_EXCL) {
2456 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2457 if (nfs4_has_persistent_session(clp))
2458 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2459 else if (clp->cl_mvops->minor_version > 0)
2460 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2461 }
2462 return;
2463 unlock_no_action:
2464 trace_nfs4_cached_open(data->state);
2465 rcu_read_unlock();
2466 out_no_action:
2467 task->tk_action = NULL;
2468 out_wait:
2469 nfs4_sequence_done(task, &data->o_res.seq_res);
2470 }
2471
2472 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2473 {
2474 struct nfs4_opendata *data = calldata;
2475
2476 data->rpc_status = task->tk_status;
2477
2478 if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2479 return;
2480
2481 if (task->tk_status == 0) {
2482 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2483 switch (data->o_res.f_attr->mode & S_IFMT) {
2484 case S_IFREG:
2485 break;
2486 case S_IFLNK:
2487 data->rpc_status = -ELOOP;
2488 break;
2489 case S_IFDIR:
2490 data->rpc_status = -EISDIR;
2491 break;
2492 default:
2493 data->rpc_status = -ENOTDIR;
2494 }
2495 }
2496 renew_lease(data->o_res.server, data->timestamp);
2497 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2498 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2499 }
2500 data->rpc_done = true;
2501 }
2502
2503 static void nfs4_open_release(void *calldata)
2504 {
2505 struct nfs4_opendata *data = calldata;
2506 struct nfs4_state *state = NULL;
2507
2508 /* If this request hasn't been cancelled, do nothing */
2509 if (!data->cancelled)
2510 goto out_free;
2511 /* In case of error, no cleanup! */
2512 if (data->rpc_status != 0 || !data->rpc_done)
2513 goto out_free;
2514 /* In case we need an open_confirm, no cleanup! */
2515 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2516 goto out_free;
2517 state = nfs4_opendata_to_nfs4_state(data);
2518 if (!IS_ERR(state))
2519 nfs4_close_state(state, data->o_arg.fmode);
2520 out_free:
2521 nfs4_opendata_put(data);
2522 }
2523
2524 static const struct rpc_call_ops nfs4_open_ops = {
2525 .rpc_call_prepare = nfs4_open_prepare,
2526 .rpc_call_done = nfs4_open_done,
2527 .rpc_release = nfs4_open_release,
2528 };
2529
2530 static int nfs4_run_open_task(struct nfs4_opendata *data,
2531 struct nfs_open_context *ctx)
2532 {
2533 struct inode *dir = d_inode(data->dir);
2534 struct nfs_server *server = NFS_SERVER(dir);
2535 struct nfs_openargs *o_arg = &data->o_arg;
2536 struct nfs_openres *o_res = &data->o_res;
2537 struct rpc_task *task;
2538 struct rpc_message msg = {
2539 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2540 .rpc_argp = o_arg,
2541 .rpc_resp = o_res,
2542 .rpc_cred = data->owner->so_cred,
2543 };
2544 struct rpc_task_setup task_setup_data = {
2545 .rpc_client = server->client,
2546 .rpc_message = &msg,
2547 .callback_ops = &nfs4_open_ops,
2548 .callback_data = data,
2549 .workqueue = nfsiod_workqueue,
2550 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2551 };
2552 int status;
2553
2554 kref_get(&data->kref);
2555 data->rpc_done = false;
2556 data->rpc_status = 0;
2557 data->cancelled = false;
2558 data->is_recover = false;
2559 if (!ctx) {
2560 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
2561 data->is_recover = true;
2562 task_setup_data.flags |= RPC_TASK_TIMEOUT;
2563 } else {
2564 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
2565 pnfs_lgopen_prepare(data, ctx);
2566 }
2567 task = rpc_run_task(&task_setup_data);
2568 if (IS_ERR(task))
2569 return PTR_ERR(task);
2570 status = rpc_wait_for_completion_task(task);
2571 if (status != 0) {
2572 data->cancelled = true;
2573 smp_wmb();
2574 } else
2575 status = data->rpc_status;
2576 rpc_put_task(task);
2577
2578 return status;
2579 }
2580
2581 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2582 {
2583 struct inode *dir = d_inode(data->dir);
2584 struct nfs_openres *o_res = &data->o_res;
2585 int status;
2586
2587 status = nfs4_run_open_task(data, NULL);
2588 if (status != 0 || !data->rpc_done)
2589 return status;
2590
2591 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2592
2593 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
2594 status = _nfs4_proc_open_confirm(data);
2595
2596 return status;
2597 }
2598
2599 /*
2600 * Additional permission checks in order to distinguish between an
2601 * open for read, and an open for execute. This works around the
2602 * fact that NFSv4 OPEN treats read and execute permissions as being
2603 * the same.
2604 * Note that in the non-execute case, we want to turn off permission
2605 * checking if we just created a new file (POSIX open() semantics).
2606 */
2607 static int nfs4_opendata_access(const struct cred *cred,
2608 struct nfs4_opendata *opendata,
2609 struct nfs4_state *state, fmode_t fmode,
2610 int openflags)
2611 {
2612 struct nfs_access_entry cache;
2613 u32 mask, flags;
2614
2615 /* access call failed or for some reason the server doesn't
2616 * support any access modes -- defer access call until later */
2617 if (opendata->o_res.access_supported == 0)
2618 return 0;
2619
2620 mask = 0;
2621 /*
2622 * Use openflags to check for exec, because fmode won't
2623 * always have FMODE_EXEC set when file open for exec.
2624 */
2625 if (openflags & __FMODE_EXEC) {
2626 /* ONLY check for exec rights */
2627 if (S_ISDIR(state->inode->i_mode))
2628 mask = NFS4_ACCESS_LOOKUP;
2629 else
2630 mask = NFS4_ACCESS_EXECUTE;
2631 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2632 mask = NFS4_ACCESS_READ;
2633
2634 cache.cred = cred;
2635 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2636 nfs_access_add_cache(state->inode, &cache);
2637
2638 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2639 if ((mask & ~cache.mask & flags) == 0)
2640 return 0;
2641
2642 return -EACCES;
2643 }
2644
2645 /*
2646 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2647 */
2648 static int _nfs4_proc_open(struct nfs4_opendata *data,
2649 struct nfs_open_context *ctx)
2650 {
2651 struct inode *dir = d_inode(data->dir);
2652 struct nfs_server *server = NFS_SERVER(dir);
2653 struct nfs_openargs *o_arg = &data->o_arg;
2654 struct nfs_openres *o_res = &data->o_res;
2655 int status;
2656
2657 status = nfs4_run_open_task(data, ctx);
2658 if (!data->rpc_done)
2659 return status;
2660 if (status != 0) {
2661 if (status == -NFS4ERR_BADNAME &&
2662 !(o_arg->open_flags & O_CREAT))
2663 return -ENOENT;
2664 return status;
2665 }
2666
2667 nfs_fattr_map_and_free_names(server, &data->f_attr);
2668
2669 if (o_arg->open_flags & O_CREAT) {
2670 if (o_arg->open_flags & O_EXCL)
2671 data->file_created = true;
2672 else if (o_res->cinfo.before != o_res->cinfo.after)
2673 data->file_created = true;
2674 if (data->file_created ||
2675 inode_peek_iversion_raw(dir) != o_res->cinfo.after)
2676 nfs4_update_changeattr(dir, &o_res->cinfo,
2677 o_res->f_attr->time_start,
2678 NFS_INO_INVALID_DATA);
2679 }
2680 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2681 server->caps &= ~NFS_CAP_POSIX_LOCK;
2682 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2683 status = _nfs4_proc_open_confirm(data);
2684 if (status != 0)
2685 return status;
2686 }
2687 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
2688 nfs4_sequence_free_slot(&o_res->seq_res);
2689 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr,
2690 o_res->f_label, NULL);
2691 }
2692 return 0;
2693 }
2694
2695 /*
2696 * OPEN_EXPIRED:
2697 * reclaim state on the server after a network partition.
2698 * Assumes caller holds the appropriate lock
2699 */
2700 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2701 {
2702 struct nfs4_opendata *opendata;
2703 int ret;
2704
2705 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2706 NFS4_OPEN_CLAIM_FH);
2707 if (IS_ERR(opendata))
2708 return PTR_ERR(opendata);
2709 ret = nfs4_open_recover(opendata, state);
2710 if (ret == -ESTALE)
2711 d_drop(ctx->dentry);
2712 nfs4_opendata_put(opendata);
2713 return ret;
2714 }
2715
2716 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2717 {
2718 struct nfs_server *server = NFS_SERVER(state->inode);
2719 struct nfs4_exception exception = { };
2720 int err;
2721
2722 do {
2723 err = _nfs4_open_expired(ctx, state);
2724 trace_nfs4_open_expired(ctx, 0, err);
2725 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2726 continue;
2727 switch (err) {
2728 default:
2729 goto out;
2730 case -NFS4ERR_GRACE:
2731 case -NFS4ERR_DELAY:
2732 nfs4_handle_exception(server, err, &exception);
2733 err = 0;
2734 }
2735 } while (exception.retry);
2736 out:
2737 return err;
2738 }
2739
2740 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2741 {
2742 struct nfs_open_context *ctx;
2743 int ret;
2744
2745 ctx = nfs4_state_find_open_context(state);
2746 if (IS_ERR(ctx))
2747 return -EAGAIN;
2748 ret = nfs4_do_open_expired(ctx, state);
2749 put_nfs_open_context(ctx);
2750 return ret;
2751 }
2752
2753 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2754 const nfs4_stateid *stateid)
2755 {
2756 nfs_remove_bad_delegation(state->inode, stateid);
2757 nfs_state_clear_delegation(state);
2758 }
2759
2760 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2761 {
2762 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2763 nfs_finish_clear_delegation_stateid(state, NULL);
2764 }
2765
2766 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2767 {
2768 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2769 nfs40_clear_delegation_stateid(state);
2770 nfs_state_clear_open_state_flags(state);
2771 return nfs4_open_expired(sp, state);
2772 }
2773
2774 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
2775 nfs4_stateid *stateid,
2776 const struct cred *cred)
2777 {
2778 return -NFS4ERR_BAD_STATEID;
2779 }
2780
2781 #if defined(CONFIG_NFS_V4_1)
2782 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
2783 nfs4_stateid *stateid,
2784 const struct cred *cred)
2785 {
2786 int status;
2787
2788 switch (stateid->type) {
2789 default:
2790 break;
2791 case NFS4_INVALID_STATEID_TYPE:
2792 case NFS4_SPECIAL_STATEID_TYPE:
2793 return -NFS4ERR_BAD_STATEID;
2794 case NFS4_REVOKED_STATEID_TYPE:
2795 goto out_free;
2796 }
2797
2798 status = nfs41_test_stateid(server, stateid, cred);
2799 switch (status) {
2800 case -NFS4ERR_EXPIRED:
2801 case -NFS4ERR_ADMIN_REVOKED:
2802 case -NFS4ERR_DELEG_REVOKED:
2803 break;
2804 default:
2805 return status;
2806 }
2807 out_free:
2808 /* Ack the revoked state to the server */
2809 nfs41_free_stateid(server, stateid, cred, true);
2810 return -NFS4ERR_EXPIRED;
2811 }
2812
2813 static int nfs41_check_delegation_stateid(struct nfs4_state *state)
2814 {
2815 struct nfs_server *server = NFS_SERVER(state->inode);
2816 nfs4_stateid stateid;
2817 struct nfs_delegation *delegation;
2818 const struct cred *cred = NULL;
2819 int status, ret = NFS_OK;
2820
2821 /* Get the delegation credential for use by test/free_stateid */
2822 rcu_read_lock();
2823 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2824 if (delegation == NULL) {
2825 rcu_read_unlock();
2826 nfs_state_clear_delegation(state);
2827 return NFS_OK;
2828 }
2829
2830 spin_lock(&delegation->lock);
2831 nfs4_stateid_copy(&stateid, &delegation->stateid);
2832
2833 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2834 &delegation->flags)) {
2835 spin_unlock(&delegation->lock);
2836 rcu_read_unlock();
2837 return NFS_OK;
2838 }
2839
2840 if (delegation->cred)
2841 cred = get_cred(delegation->cred);
2842 spin_unlock(&delegation->lock);
2843 rcu_read_unlock();
2844 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
2845 trace_nfs4_test_delegation_stateid(state, NULL, status);
2846 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2847 nfs_finish_clear_delegation_stateid(state, &stateid);
2848 else
2849 ret = status;
2850
2851 put_cred(cred);
2852 return ret;
2853 }
2854
2855 static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
2856 {
2857 nfs4_stateid tmp;
2858
2859 if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
2860 nfs4_copy_delegation_stateid(state->inode, state->state,
2861 &tmp, NULL) &&
2862 nfs4_stateid_match_other(&state->stateid, &tmp))
2863 nfs_state_set_delegation(state, &tmp, state->state);
2864 else
2865 nfs_state_clear_delegation(state);
2866 }
2867
2868 /**
2869 * nfs41_check_expired_locks - possibly free a lock stateid
2870 *
2871 * @state: NFSv4 state for an inode
2872 *
2873 * Returns NFS_OK if recovery for this stateid is now finished.
2874 * Otherwise a negative NFS4ERR value is returned.
2875 */
2876 static int nfs41_check_expired_locks(struct nfs4_state *state)
2877 {
2878 int status, ret = NFS_OK;
2879 struct nfs4_lock_state *lsp, *prev = NULL;
2880 struct nfs_server *server = NFS_SERVER(state->inode);
2881
2882 if (!test_bit(LK_STATE_IN_USE, &state->flags))
2883 goto out;
2884
2885 spin_lock(&state->state_lock);
2886 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2887 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
2888 const struct cred *cred = lsp->ls_state->owner->so_cred;
2889
2890 refcount_inc(&lsp->ls_count);
2891 spin_unlock(&state->state_lock);
2892
2893 nfs4_put_lock_state(prev);
2894 prev = lsp;
2895
2896 status = nfs41_test_and_free_expired_stateid(server,
2897 &lsp->ls_stateid,
2898 cred);
2899 trace_nfs4_test_lock_stateid(state, lsp, status);
2900 if (status == -NFS4ERR_EXPIRED ||
2901 status == -NFS4ERR_BAD_STATEID) {
2902 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
2903 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
2904 if (!recover_lost_locks)
2905 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2906 } else if (status != NFS_OK) {
2907 ret = status;
2908 nfs4_put_lock_state(prev);
2909 goto out;
2910 }
2911 spin_lock(&state->state_lock);
2912 }
2913 }
2914 spin_unlock(&state->state_lock);
2915 nfs4_put_lock_state(prev);
2916 out:
2917 return ret;
2918 }
2919
2920 /**
2921 * nfs41_check_open_stateid - possibly free an open stateid
2922 *
2923 * @state: NFSv4 state for an inode
2924 *
2925 * Returns NFS_OK if recovery for this stateid is now finished.
2926 * Otherwise a negative NFS4ERR value is returned.
2927 */
2928 static int nfs41_check_open_stateid(struct nfs4_state *state)
2929 {
2930 struct nfs_server *server = NFS_SERVER(state->inode);
2931 nfs4_stateid *stateid = &state->open_stateid;
2932 const struct cred *cred = state->owner->so_cred;
2933 int status;
2934
2935 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
2936 return -NFS4ERR_BAD_STATEID;
2937 status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
2938 trace_nfs4_test_open_stateid(state, NULL, status);
2939 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
2940 nfs_state_clear_open_state_flags(state);
2941 stateid->type = NFS4_INVALID_STATEID_TYPE;
2942 return status;
2943 }
2944 if (nfs_open_stateid_recover_openmode(state))
2945 return -NFS4ERR_OPENMODE;
2946 return NFS_OK;
2947 }
2948
2949 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2950 {
2951 int status;
2952
2953 status = nfs41_check_delegation_stateid(state);
2954 if (status != NFS_OK)
2955 return status;
2956 nfs41_delegation_recover_stateid(state);
2957
2958 status = nfs41_check_expired_locks(state);
2959 if (status != NFS_OK)
2960 return status;
2961 status = nfs41_check_open_stateid(state);
2962 if (status != NFS_OK)
2963 status = nfs4_open_expired(sp, state);
2964 return status;
2965 }
2966 #endif
2967
2968 /*
2969 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2970 * fields corresponding to attributes that were used to store the verifier.
2971 * Make sure we clobber those fields in the later setattr call
2972 */
2973 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2974 struct iattr *sattr, struct nfs4_label **label)
2975 {
2976 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
2977 __u32 attrset[3];
2978 unsigned ret;
2979 unsigned i;
2980
2981 for (i = 0; i < ARRAY_SIZE(attrset); i++) {
2982 attrset[i] = opendata->o_res.attrset[i];
2983 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
2984 attrset[i] &= ~bitmask[i];
2985 }
2986
2987 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
2988 sattr->ia_valid : 0;
2989
2990 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
2991 if (sattr->ia_valid & ATTR_ATIME_SET)
2992 ret |= ATTR_ATIME_SET;
2993 else
2994 ret |= ATTR_ATIME;
2995 }
2996
2997 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
2998 if (sattr->ia_valid & ATTR_MTIME_SET)
2999 ret |= ATTR_MTIME_SET;
3000 else
3001 ret |= ATTR_MTIME;
3002 }
3003
3004 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
3005 *label = NULL;
3006 return ret;
3007 }
3008
3009 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3010 int flags, struct nfs_open_context *ctx)
3011 {
3012 struct nfs4_state_owner *sp = opendata->owner;
3013 struct nfs_server *server = sp->so_server;
3014 struct dentry *dentry;
3015 struct nfs4_state *state;
3016 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
3017 struct inode *dir = d_inode(opendata->dir);
3018 unsigned long dir_verifier;
3019 unsigned int seq;
3020 int ret;
3021
3022 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
3023 dir_verifier = nfs_save_change_attribute(dir);
3024
3025 ret = _nfs4_proc_open(opendata, ctx);
3026 if (ret != 0)
3027 goto out;
3028
3029 state = _nfs4_opendata_to_nfs4_state(opendata);
3030 ret = PTR_ERR(state);
3031 if (IS_ERR(state))
3032 goto out;
3033 ctx->state = state;
3034 if (server->caps & NFS_CAP_POSIX_LOCK)
3035 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
3036 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
3037 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
3038
3039 dentry = opendata->dentry;
3040 if (d_really_is_negative(dentry)) {
3041 struct dentry *alias;
3042 d_drop(dentry);
3043 alias = d_exact_alias(dentry, state->inode);
3044 if (!alias)
3045 alias = d_splice_alias(igrab(state->inode), dentry);
3046 /* d_splice_alias() can't fail here - it's a non-directory */
3047 if (alias) {
3048 dput(ctx->dentry);
3049 ctx->dentry = dentry = alias;
3050 }
3051 }
3052
3053 switch(opendata->o_arg.claim) {
3054 default:
3055 break;
3056 case NFS4_OPEN_CLAIM_NULL:
3057 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
3058 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
3059 if (!opendata->rpc_done)
3060 break;
3061 if (opendata->o_res.delegation_type != 0)
3062 dir_verifier = nfs_save_change_attribute(dir);
3063 nfs_set_verifier(dentry, dir_verifier);
3064 }
3065
3066 /* Parse layoutget results before we check for access */
3067 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
3068
3069 ret = nfs4_opendata_access(sp->so_cred, opendata, state,
3070 acc_mode, flags);
3071 if (ret != 0)
3072 goto out;
3073
3074 if (d_inode(dentry) == state->inode) {
3075 nfs_inode_attach_open_context(ctx);
3076 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
3077 nfs4_schedule_stateid_recovery(server, state);
3078 }
3079
3080 out:
3081 if (!opendata->cancelled)
3082 nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3083 return ret;
3084 }
3085
3086 /*
3087 * Returns a referenced nfs4_state
3088 */
3089 static int _nfs4_do_open(struct inode *dir,
3090 struct nfs_open_context *ctx,
3091 int flags,
3092 const struct nfs4_open_createattrs *c,
3093 int *opened)
3094 {
3095 struct nfs4_state_owner *sp;
3096 struct nfs4_state *state = NULL;
3097 struct nfs_server *server = NFS_SERVER(dir);
3098 struct nfs4_opendata *opendata;
3099 struct dentry *dentry = ctx->dentry;
3100 const struct cred *cred = ctx->cred;
3101 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
3102 fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
3103 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
3104 struct iattr *sattr = c->sattr;
3105 struct nfs4_label *label = c->label;
3106 struct nfs4_label *olabel = NULL;
3107 int status;
3108
3109 /* Protect against reboot recovery conflicts */
3110 status = -ENOMEM;
3111 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
3112 if (sp == NULL) {
3113 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
3114 goto out_err;
3115 }
3116 status = nfs4_client_recover_expired_lease(server->nfs_client);
3117 if (status != 0)
3118 goto err_put_state_owner;
3119 if (d_really_is_positive(dentry))
3120 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
3121 status = -ENOMEM;
3122 if (d_really_is_positive(dentry))
3123 claim = NFS4_OPEN_CLAIM_FH;
3124 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
3125 c, claim, GFP_KERNEL);
3126 if (opendata == NULL)
3127 goto err_put_state_owner;
3128
3129 if (label) {
3130 olabel = nfs4_label_alloc(server, GFP_KERNEL);
3131 if (IS_ERR(olabel)) {
3132 status = PTR_ERR(olabel);
3133 goto err_opendata_put;
3134 }
3135 }
3136
3137 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
3138 if (!opendata->f_attr.mdsthreshold) {
3139 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
3140 if (!opendata->f_attr.mdsthreshold)
3141 goto err_free_label;
3142 }
3143 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
3144 }
3145 if (d_really_is_positive(dentry))
3146 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
3147
3148 status = _nfs4_open_and_get_state(opendata, flags, ctx);
3149 if (status != 0)
3150 goto err_free_label;
3151 state = ctx->state;
3152
3153 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
3154 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
3155 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
3156 /*
3157 * send create attributes which was not set by open
3158 * with an extra setattr.
3159 */
3160 if (attrs || label) {
3161 unsigned ia_old = sattr->ia_valid;
3162
3163 sattr->ia_valid = attrs;
3164 nfs_fattr_init(opendata->o_res.f_attr);
3165 status = nfs4_do_setattr(state->inode, cred,
3166 opendata->o_res.f_attr, sattr,
3167 ctx, label, olabel);
3168 if (status == 0) {
3169 nfs_setattr_update_inode(state->inode, sattr,
3170 opendata->o_res.f_attr);
3171 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
3172 }
3173 sattr->ia_valid = ia_old;
3174 }
3175 }
3176 if (opened && opendata->file_created)
3177 *opened = 1;
3178
3179 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
3180 *ctx_th = opendata->f_attr.mdsthreshold;
3181 opendata->f_attr.mdsthreshold = NULL;
3182 }
3183
3184 nfs4_label_free(olabel);
3185
3186 nfs4_opendata_put(opendata);
3187 nfs4_put_state_owner(sp);
3188 return 0;
3189 err_free_label:
3190 nfs4_label_free(olabel);
3191 err_opendata_put:
3192 nfs4_opendata_put(opendata);
3193 err_put_state_owner:
3194 nfs4_put_state_owner(sp);
3195 out_err:
3196 return status;
3197 }
3198
3199
3200 static struct nfs4_state *nfs4_do_open(struct inode *dir,
3201 struct nfs_open_context *ctx,
3202 int flags,
3203 struct iattr *sattr,
3204 struct nfs4_label *label,
3205 int *opened)
3206 {
3207 struct nfs_server *server = NFS_SERVER(dir);
3208 struct nfs4_exception exception = {
3209 .interruptible = true,
3210 };
3211 struct nfs4_state *res;
3212 struct nfs4_open_createattrs c = {
3213 .label = label,
3214 .sattr = sattr,
3215 .verf = {
3216 [0] = (__u32)jiffies,
3217 [1] = (__u32)current->pid,
3218 },
3219 };
3220 int status;
3221
3222 do {
3223 status = _nfs4_do_open(dir, ctx, flags, &c, opened);
3224 res = ctx->state;
3225 trace_nfs4_open_file(ctx, flags, status);
3226 if (status == 0)
3227 break;
3228 /* NOTE: BAD_SEQID means the server and client disagree about the
3229 * book-keeping w.r.t. state-changing operations
3230 * (OPEN/CLOSE/LOCK/LOCKU...)
3231 * It is actually a sign of a bug on the client or on the server.
3232 *
3233 * If we receive a BAD_SEQID error in the particular case of
3234 * doing an OPEN, we assume that nfs_increment_open_seqid() will
3235 * have unhashed the old state_owner for us, and that we can
3236 * therefore safely retry using a new one. We should still warn
3237 * the user though...
3238 */
3239 if (status == -NFS4ERR_BAD_SEQID) {
3240 pr_warn_ratelimited("NFS: v4 server %s "
3241 " returned a bad sequence-id error!\n",
3242 NFS_SERVER(dir)->nfs_client->cl_hostname);
3243 exception.retry = 1;
3244 continue;
3245 }
3246 /*
3247 * BAD_STATEID on OPEN means that the server cancelled our
3248 * state before it received the OPEN_CONFIRM.
3249 * Recover by retrying the request as per the discussion
3250 * on Page 181 of RFC3530.
3251 */
3252 if (status == -NFS4ERR_BAD_STATEID) {
3253 exception.retry = 1;
3254 continue;
3255 }
3256 if (status == -NFS4ERR_EXPIRED) {
3257 nfs4_schedule_lease_recovery(server->nfs_client);
3258 exception.retry = 1;
3259 continue;
3260 }
3261 if (status == -EAGAIN) {
3262 /* We must have found a delegation */
3263 exception.retry = 1;
3264 continue;
3265 }
3266 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
3267 continue;
3268 res = ERR_PTR(nfs4_handle_exception(server,
3269 status, &exception));
3270 } while (exception.retry);
3271 return res;
3272 }
3273
3274 static int _nfs4_do_setattr(struct inode *inode,
3275 struct nfs_setattrargs *arg,
3276 struct nfs_setattrres *res,
3277 const struct cred *cred,
3278 struct nfs_open_context *ctx)
3279 {
3280 struct nfs_server *server = NFS_SERVER(inode);
3281 struct rpc_message msg = {
3282 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
3283 .rpc_argp = arg,
3284 .rpc_resp = res,
3285 .rpc_cred = cred,
3286 };
3287 const struct cred *delegation_cred = NULL;
3288 unsigned long timestamp = jiffies;
3289 bool truncate;
3290 int status;
3291
3292 nfs_fattr_init(res->fattr);
3293
3294 /* Servers should only apply open mode checks for file size changes */
3295 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
3296 if (!truncate) {
3297 nfs4_inode_make_writeable(inode);
3298 goto zero_stateid;
3299 }
3300
3301 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
3302 /* Use that stateid */
3303 } else if (ctx != NULL && ctx->state) {
3304 struct nfs_lock_context *l_ctx;
3305 if (!nfs4_valid_open_stateid(ctx->state))
3306 return -EBADF;
3307 l_ctx = nfs_get_lock_context(ctx);
3308 if (IS_ERR(l_ctx))
3309 return PTR_ERR(l_ctx);
3310 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
3311 &arg->stateid, &delegation_cred);
3312 nfs_put_lock_context(l_ctx);
3313 if (status == -EIO)
3314 return -EBADF;
3315 else if (status == -EAGAIN)
3316 goto zero_stateid;
3317 } else {
3318 zero_stateid:
3319 nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3320 }
3321 if (delegation_cred)
3322 msg.rpc_cred = delegation_cred;
3323
3324 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
3325
3326 put_cred(delegation_cred);
3327 if (status == 0 && ctx != NULL)
3328 renew_lease(server, timestamp);
3329 trace_nfs4_setattr(inode, &arg->stateid, status);
3330 return status;
3331 }
3332
3333 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
3334 struct nfs_fattr *fattr, struct iattr *sattr,
3335 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
3336 struct nfs4_label *olabel)
3337 {
3338 struct nfs_server *server = NFS_SERVER(inode);
3339 __u32 bitmask[NFS4_BITMASK_SZ];
3340 struct nfs4_state *state = ctx ? ctx->state : NULL;
3341 struct nfs_setattrargs arg = {
3342 .fh = NFS_FH(inode),
3343 .iap = sattr,
3344 .server = server,
3345 .bitmask = bitmask,
3346 .label = ilabel,
3347 };
3348 struct nfs_setattrres res = {
3349 .fattr = fattr,
3350 .label = olabel,
3351 .server = server,
3352 };
3353 struct nfs4_exception exception = {
3354 .state = state,
3355 .inode = inode,
3356 .stateid = &arg.stateid,
3357 };
3358 int err;
3359
3360 do {
3361 nfs4_bitmap_copy_adjust_setattr(bitmask,
3362 nfs4_bitmask(server, olabel),
3363 inode);
3364
3365 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
3366 switch (err) {
3367 case -NFS4ERR_OPENMODE:
3368 if (!(sattr->ia_valid & ATTR_SIZE)) {
3369 pr_warn_once("NFSv4: server %s is incorrectly "
3370 "applying open mode checks to "
3371 "a SETATTR that is not "
3372 "changing file size.\n",
3373 server->nfs_client->cl_hostname);
3374 }
3375 if (state && !(state->state & FMODE_WRITE)) {
3376 err = -EBADF;
3377 if (sattr->ia_valid & ATTR_OPEN)
3378 err = -EACCES;
3379 goto out;
3380 }
3381 }
3382 err = nfs4_handle_exception(server, err, &exception);
3383 } while (exception.retry);
3384 out:
3385 return err;
3386 }
3387
3388 static bool
3389 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
3390 {
3391 if (inode == NULL || !nfs_have_layout(inode))
3392 return false;
3393
3394 return pnfs_wait_on_layoutreturn(inode, task);
3395 }
3396
3397 /*
3398 * Update the seqid of an open stateid
3399 */
3400 static void nfs4_sync_open_stateid(nfs4_stateid *dst,
3401 struct nfs4_state *state)
3402 {
3403 __be32 seqid_open;
3404 u32 dst_seqid;
3405 int seq;
3406
3407 for (;;) {
3408 if (!nfs4_valid_open_stateid(state))
3409 break;
3410 seq = read_seqbegin(&state->seqlock);
3411 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3412 nfs4_stateid_copy(dst, &state->open_stateid);
3413 if (read_seqretry(&state->seqlock, seq))
3414 continue;
3415 break;
3416 }
3417 seqid_open = state->open_stateid.seqid;
3418 if (read_seqretry(&state->seqlock, seq))
3419 continue;
3420
3421 dst_seqid = be32_to_cpu(dst->seqid);
3422 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
3423 dst->seqid = seqid_open;
3424 break;
3425 }
3426 }
3427
3428 /*
3429 * Update the seqid of an open stateid after receiving
3430 * NFS4ERR_OLD_STATEID
3431 */
3432 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
3433 struct nfs4_state *state)
3434 {
3435 __be32 seqid_open;
3436 u32 dst_seqid;
3437 bool ret;
3438 int seq, status = -EAGAIN;
3439 DEFINE_WAIT(wait);
3440
3441 for (;;) {
3442 ret = false;
3443 if (!nfs4_valid_open_stateid(state))
3444 break;
3445 seq = read_seqbegin(&state->seqlock);
3446 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3447 if (read_seqretry(&state->seqlock, seq))
3448 continue;
3449 break;
3450 }
3451
3452 write_seqlock(&state->seqlock);
3453 seqid_open = state->open_stateid.seqid;
3454
3455 dst_seqid = be32_to_cpu(dst->seqid);
3456
3457 /* Did another OPEN bump the state's seqid? try again: */
3458 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
3459 dst->seqid = seqid_open;
3460 write_sequnlock(&state->seqlock);
3461 ret = true;
3462 break;
3463 }
3464
3465 /* server says we're behind but we haven't seen the update yet */
3466 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
3467 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
3468 write_sequnlock(&state->seqlock);
3469 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
3470
3471 if (signal_pending(current))
3472 status = -EINTR;
3473 else
3474 if (schedule_timeout(5*HZ) != 0)
3475 status = 0;
3476
3477 finish_wait(&state->waitq, &wait);
3478
3479 if (!status)
3480 continue;
3481 if (status == -EINTR)
3482 break;
3483
3484 /* we slept the whole 5 seconds, we must have lost a seqid */
3485 dst->seqid = cpu_to_be32(dst_seqid + 1);
3486 ret = true;
3487 break;
3488 }
3489
3490 return ret;
3491 }
3492
3493 struct nfs4_closedata {
3494 struct inode *inode;
3495 struct nfs4_state *state;
3496 struct nfs_closeargs arg;
3497 struct nfs_closeres res;
3498 struct {
3499 struct nfs4_layoutreturn_args arg;
3500 struct nfs4_layoutreturn_res res;
3501 struct nfs4_xdr_opaque_data ld_private;
3502 u32 roc_barrier;
3503 bool roc;
3504 } lr;
3505 struct nfs_fattr fattr;
3506 unsigned long timestamp;
3507 };
3508
3509 static void nfs4_free_closedata(void *data)
3510 {
3511 struct nfs4_closedata *calldata = data;
3512 struct nfs4_state_owner *sp = calldata->state->owner;
3513 struct super_block *sb = calldata->state->inode->i_sb;
3514
3515 if (calldata->lr.roc)
3516 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
3517 calldata->res.lr_ret);
3518 nfs4_put_open_state(calldata->state);
3519 nfs_free_seqid(calldata->arg.seqid);
3520 nfs4_put_state_owner(sp);
3521 nfs_sb_deactive(sb);
3522 kfree(calldata);
3523 }
3524
3525 static void nfs4_close_done(struct rpc_task *task, void *data)
3526 {
3527 struct nfs4_closedata *calldata = data;
3528 struct nfs4_state *state = calldata->state;
3529 struct nfs_server *server = NFS_SERVER(calldata->inode);
3530 nfs4_stateid *res_stateid = NULL;
3531 struct nfs4_exception exception = {
3532 .state = state,
3533 .inode = calldata->inode,
3534 .stateid = &calldata->arg.stateid,
3535 };
3536
3537 dprintk("%s: begin!\n", __func__);
3538 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3539 return;
3540 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
3541
3542 /* Handle Layoutreturn errors */
3543 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
3544 &calldata->res.lr_ret) == -EAGAIN)
3545 goto out_restart;
3546
3547 /* hmm. we are done with the inode, and in the process of freeing
3548 * the state_owner. we keep this around to process errors
3549 */
3550 switch (task->tk_status) {
3551 case 0:
3552 res_stateid = &calldata->res.stateid;
3553 renew_lease(server, calldata->timestamp);
3554 break;
3555 case -NFS4ERR_ACCESS:
3556 if (calldata->arg.bitmask != NULL) {
3557 calldata->arg.bitmask = NULL;
3558 calldata->res.fattr = NULL;
3559 goto out_restart;
3560
3561 }
3562 break;
3563 case -NFS4ERR_OLD_STATEID:
3564 /* Did we race with OPEN? */
3565 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
3566 state))
3567 goto out_restart;
3568 goto out_release;
3569 case -NFS4ERR_ADMIN_REVOKED:
3570 case -NFS4ERR_STALE_STATEID:
3571 case -NFS4ERR_EXPIRED:
3572 nfs4_free_revoked_stateid(server,
3573 &calldata->arg.stateid,
3574 task->tk_msg.rpc_cred);
3575 fallthrough;
3576 case -NFS4ERR_BAD_STATEID:
3577 if (calldata->arg.fmode == 0)
3578 break;
3579 fallthrough;
3580 default:
3581 task->tk_status = nfs4_async_handle_exception(task,
3582 server, task->tk_status, &exception);
3583 if (exception.retry)
3584 goto out_restart;
3585 }
3586 nfs_clear_open_stateid(state, &calldata->arg.stateid,
3587 res_stateid, calldata->arg.fmode);
3588 out_release:
3589 task->tk_status = 0;
3590 nfs_release_seqid(calldata->arg.seqid);
3591 nfs_refresh_inode(calldata->inode, &calldata->fattr);
3592 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
3593 return;
3594 out_restart:
3595 task->tk_status = 0;
3596 rpc_restart_call_prepare(task);
3597 goto out_release;
3598 }
3599
3600 static void nfs4_close_prepare(struct rpc_task *task, void *data)
3601 {
3602 struct nfs4_closedata *calldata = data;
3603 struct nfs4_state *state = calldata->state;
3604 struct inode *inode = calldata->inode;
3605 struct pnfs_layout_hdr *lo;
3606 bool is_rdonly, is_wronly, is_rdwr;
3607 int call_close = 0;
3608
3609 dprintk("%s: begin!\n", __func__);
3610 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3611 goto out_wait;
3612
3613 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
3614 spin_lock(&state->owner->so_lock);
3615 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
3616 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
3617 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
3618 /* Calculate the change in open mode */
3619 calldata->arg.fmode = 0;
3620 if (state->n_rdwr == 0) {
3621 if (state->n_rdonly == 0)
3622 call_close |= is_rdonly;
3623 else if (is_rdonly)
3624 calldata->arg.fmode |= FMODE_READ;
3625 if (state->n_wronly == 0)
3626 call_close |= is_wronly;
3627 else if (is_wronly)
3628 calldata->arg.fmode |= FMODE_WRITE;
3629 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3630 call_close |= is_rdwr;
3631 } else if (is_rdwr)
3632 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3633
3634 nfs4_sync_open_stateid(&calldata->arg.stateid, state);
3635 if (!nfs4_valid_open_stateid(state))
3636 call_close = 0;
3637 spin_unlock(&state->owner->so_lock);
3638
3639 if (!call_close) {
3640 /* Note: exit _without_ calling nfs4_close_done */
3641 goto out_no_action;
3642 }
3643
3644 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
3645 nfs_release_seqid(calldata->arg.seqid);
3646 goto out_wait;
3647 }
3648
3649 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
3650 if (lo && !pnfs_layout_is_valid(lo)) {
3651 calldata->arg.lr_args = NULL;
3652 calldata->res.lr_res = NULL;
3653 }
3654
3655 if (calldata->arg.fmode == 0)
3656 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
3657
3658 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
3659 /* Close-to-open cache consistency revalidation */
3660 if (!nfs4_have_delegation(inode, FMODE_READ)) {
3661 calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
3662 nfs4_bitmask_adjust(calldata->arg.bitmask, inode, NFS_SERVER(inode), NULL);
3663 } else
3664 calldata->arg.bitmask = NULL;
3665 }
3666
3667 calldata->arg.share_access =
3668 nfs4_map_atomic_open_share(NFS_SERVER(inode),
3669 calldata->arg.fmode, 0);
3670
3671 if (calldata->res.fattr == NULL)
3672 calldata->arg.bitmask = NULL;
3673 else if (calldata->arg.bitmask == NULL)
3674 calldata->res.fattr = NULL;
3675 calldata->timestamp = jiffies;
3676 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
3677 &calldata->arg.seq_args,
3678 &calldata->res.seq_res,
3679 task) != 0)
3680 nfs_release_seqid(calldata->arg.seqid);
3681 dprintk("%s: done!\n", __func__);
3682 return;
3683 out_no_action:
3684 task->tk_action = NULL;
3685 out_wait:
3686 nfs4_sequence_done(task, &calldata->res.seq_res);
3687 }
3688
3689 static const struct rpc_call_ops nfs4_close_ops = {
3690 .rpc_call_prepare = nfs4_close_prepare,
3691 .rpc_call_done = nfs4_close_done,
3692 .rpc_release = nfs4_free_closedata,
3693 };
3694
3695 /*
3696 * It is possible for data to be read/written from a mem-mapped file
3697 * after the sys_close call (which hits the vfs layer as a flush).
3698 * This means that we can't safely call nfsv4 close on a file until
3699 * the inode is cleared. This in turn means that we are not good
3700 * NFSv4 citizens - we do not indicate to the server to update the file's
3701 * share state even when we are done with one of the three share
3702 * stateid's in the inode.
3703 *
3704 * NOTE: Caller must be holding the sp->so_owner semaphore!
3705 */
3706 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3707 {
3708 struct nfs_server *server = NFS_SERVER(state->inode);
3709 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3710 struct nfs4_closedata *calldata;
3711 struct nfs4_state_owner *sp = state->owner;
3712 struct rpc_task *task;
3713 struct rpc_message msg = {
3714 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3715 .rpc_cred = state->owner->so_cred,
3716 };
3717 struct rpc_task_setup task_setup_data = {
3718 .rpc_client = server->client,
3719 .rpc_message = &msg,
3720 .callback_ops = &nfs4_close_ops,
3721 .workqueue = nfsiod_workqueue,
3722 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
3723 };
3724 int status = -ENOMEM;
3725
3726 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3727 &task_setup_data.rpc_client, &msg);
3728
3729 calldata = kzalloc(sizeof(*calldata), gfp_mask);
3730 if (calldata == NULL)
3731 goto out;
3732 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0);
3733 calldata->inode = state->inode;
3734 calldata->state = state;
3735 calldata->arg.fh = NFS_FH(state->inode);
3736 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
3737 goto out_free_calldata;
3738 /* Serialization for the sequence id */
3739 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3740 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3741 if (IS_ERR(calldata->arg.seqid))
3742 goto out_free_calldata;
3743 nfs_fattr_init(&calldata->fattr);
3744 calldata->arg.fmode = 0;
3745 calldata->lr.arg.ld_private = &calldata->lr.ld_private;
3746 calldata->res.fattr = &calldata->fattr;
3747 calldata->res.seqid = calldata->arg.seqid;
3748 calldata->res.server = server;
3749 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3750 calldata->lr.roc = pnfs_roc(state->inode,
3751 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
3752 if (calldata->lr.roc) {
3753 calldata->arg.lr_args = &calldata->lr.arg;
3754 calldata->res.lr_res = &calldata->lr.res;
3755 }
3756 nfs_sb_active(calldata->inode->i_sb);
3757
3758 msg.rpc_argp = &calldata->arg;
3759 msg.rpc_resp = &calldata->res;
3760 task_setup_data.callback_data = calldata;
3761 task = rpc_run_task(&task_setup_data);
3762 if (IS_ERR(task))
3763 return PTR_ERR(task);
3764 status = 0;
3765 if (wait)
3766 status = rpc_wait_for_completion_task(task);
3767 rpc_put_task(task);
3768 return status;
3769 out_free_calldata:
3770 kfree(calldata);
3771 out:
3772 nfs4_put_open_state(state);
3773 nfs4_put_state_owner(sp);
3774 return status;
3775 }
3776
3777 static struct inode *
3778 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3779 int open_flags, struct iattr *attr, int *opened)
3780 {
3781 struct nfs4_state *state;
3782 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
3783
3784 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3785
3786 /* Protect against concurrent sillydeletes */
3787 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3788
3789 nfs4_label_release_security(label);
3790
3791 if (IS_ERR(state))
3792 return ERR_CAST(state);
3793 return state->inode;
3794 }
3795
3796 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3797 {
3798 if (ctx->state == NULL)
3799 return;
3800 if (is_sync)
3801 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
3802 else
3803 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
3804 }
3805
3806 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3807 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3808 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL)
3809
3810 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3811 {
3812 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3813 struct nfs4_server_caps_arg args = {
3814 .fhandle = fhandle,
3815 .bitmask = bitmask,
3816 };
3817 struct nfs4_server_caps_res res = {};
3818 struct rpc_message msg = {
3819 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3820 .rpc_argp = &args,
3821 .rpc_resp = &res,
3822 };
3823 int status;
3824 int i;
3825
3826 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3827 FATTR4_WORD0_FH_EXPIRE_TYPE |
3828 FATTR4_WORD0_LINK_SUPPORT |
3829 FATTR4_WORD0_SYMLINK_SUPPORT |
3830 FATTR4_WORD0_ACLSUPPORT;
3831 if (minorversion)
3832 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3833
3834 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3835 if (status == 0) {
3836 /* Sanity check the server answers */
3837 switch (minorversion) {
3838 case 0:
3839 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3840 res.attr_bitmask[2] = 0;
3841 break;
3842 case 1:
3843 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3844 break;
3845 case 2:
3846 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3847 }
3848 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3849 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
3850 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
3851 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
3852 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
3853 NFS_CAP_CTIME|NFS_CAP_MTIME|
3854 NFS_CAP_SECURITY_LABEL);
3855 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3856 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3857 server->caps |= NFS_CAP_ACLS;
3858 if (res.has_links != 0)
3859 server->caps |= NFS_CAP_HARDLINKS;
3860 if (res.has_symlinks != 0)
3861 server->caps |= NFS_CAP_SYMLINKS;
3862 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
3863 server->caps |= NFS_CAP_FILEID;
3864 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
3865 server->caps |= NFS_CAP_MODE;
3866 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
3867 server->caps |= NFS_CAP_NLINK;
3868 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
3869 server->caps |= NFS_CAP_OWNER;
3870 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
3871 server->caps |= NFS_CAP_OWNER_GROUP;
3872 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
3873 server->caps |= NFS_CAP_ATIME;
3874 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
3875 server->caps |= NFS_CAP_CTIME;
3876 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
3877 server->caps |= NFS_CAP_MTIME;
3878 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3879 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3880 server->caps |= NFS_CAP_SECURITY_LABEL;
3881 #endif
3882 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3883 sizeof(server->attr_bitmask));
3884 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3885
3886 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3887 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3888 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3889 server->cache_consistency_bitmask[2] = 0;
3890
3891 /* Avoid a regression due to buggy server */
3892 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
3893 res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
3894 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3895 sizeof(server->exclcreat_bitmask));
3896
3897 server->acl_bitmask = res.acl_bitmask;
3898 server->fh_expire_type = res.fh_expire_type;
3899 }
3900
3901 return status;
3902 }
3903
3904 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3905 {
3906 struct nfs4_exception exception = {
3907 .interruptible = true,
3908 };
3909 int err;
3910 do {
3911 err = nfs4_handle_exception(server,
3912 _nfs4_server_capabilities(server, fhandle),
3913 &exception);
3914 } while (exception.retry);
3915 return err;
3916 }
3917
3918 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3919 struct nfs_fsinfo *info)
3920 {
3921 u32 bitmask[3];
3922 struct nfs4_lookup_root_arg args = {
3923 .bitmask = bitmask,
3924 };
3925 struct nfs4_lookup_res res = {
3926 .server = server,
3927 .fattr = info->fattr,
3928 .fh = fhandle,
3929 };
3930 struct rpc_message msg = {
3931 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3932 .rpc_argp = &args,
3933 .rpc_resp = &res,
3934 };
3935
3936 bitmask[0] = nfs4_fattr_bitmap[0];
3937 bitmask[1] = nfs4_fattr_bitmap[1];
3938 /*
3939 * Process the label in the upcoming getfattr
3940 */
3941 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3942
3943 nfs_fattr_init(info->fattr);
3944 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3945 }
3946
3947 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3948 struct nfs_fsinfo *info)
3949 {
3950 struct nfs4_exception exception = {
3951 .interruptible = true,
3952 };
3953 int err;
3954 do {
3955 err = _nfs4_lookup_root(server, fhandle, info);
3956 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3957 switch (err) {
3958 case 0:
3959 case -NFS4ERR_WRONGSEC:
3960 goto out;
3961 default:
3962 err = nfs4_handle_exception(server, err, &exception);
3963 }
3964 } while (exception.retry);
3965 out:
3966 return err;
3967 }
3968
3969 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3970 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3971 {
3972 struct rpc_auth_create_args auth_args = {
3973 .pseudoflavor = flavor,
3974 };
3975 struct rpc_auth *auth;
3976
3977 auth = rpcauth_create(&auth_args, server->client);
3978 if (IS_ERR(auth))
3979 return -EACCES;
3980 return nfs4_lookup_root(server, fhandle, info);
3981 }
3982
3983 /*
3984 * Retry pseudoroot lookup with various security flavors. We do this when:
3985 *
3986 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3987 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3988 *
3989 * Returns zero on success, or a negative NFS4ERR value, or a
3990 * negative errno value.
3991 */
3992 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3993 struct nfs_fsinfo *info)
3994 {
3995 /* Per 3530bis 15.33.5 */
3996 static const rpc_authflavor_t flav_array[] = {
3997 RPC_AUTH_GSS_KRB5P,
3998 RPC_AUTH_GSS_KRB5I,
3999 RPC_AUTH_GSS_KRB5,
4000 RPC_AUTH_UNIX, /* courtesy */
4001 RPC_AUTH_NULL,
4002 };
4003 int status = -EPERM;
4004 size_t i;
4005
4006 if (server->auth_info.flavor_len > 0) {
4007 /* try each flavor specified by user */
4008 for (i = 0; i < server->auth_info.flavor_len; i++) {
4009 status = nfs4_lookup_root_sec(server, fhandle, info,
4010 server->auth_info.flavors[i]);
4011 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4012 continue;
4013 break;
4014 }
4015 } else {
4016 /* no flavors specified by user, try default list */
4017 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
4018 status = nfs4_lookup_root_sec(server, fhandle, info,
4019 flav_array[i]);
4020 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4021 continue;
4022 break;
4023 }
4024 }
4025
4026 /*
4027 * -EACCES could mean that the user doesn't have correct permissions
4028 * to access the mount. It could also mean that we tried to mount
4029 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
4030 * existing mount programs don't handle -EACCES very well so it should
4031 * be mapped to -EPERM instead.
4032 */
4033 if (status == -EACCES)
4034 status = -EPERM;
4035 return status;
4036 }
4037
4038 /**
4039 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
4040 * @server: initialized nfs_server handle
4041 * @fhandle: we fill in the pseudo-fs root file handle
4042 * @info: we fill in an FSINFO struct
4043 * @auth_probe: probe the auth flavours
4044 *
4045 * Returns zero on success, or a negative errno.
4046 */
4047 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
4048 struct nfs_fsinfo *info,
4049 bool auth_probe)
4050 {
4051 int status = 0;
4052
4053 if (!auth_probe)
4054 status = nfs4_lookup_root(server, fhandle, info);
4055
4056 if (auth_probe || status == NFS4ERR_WRONGSEC)
4057 status = server->nfs_client->cl_mvops->find_root_sec(server,
4058 fhandle, info);
4059
4060 if (status == 0)
4061 status = nfs4_server_capabilities(server, fhandle);
4062 if (status == 0)
4063 status = nfs4_do_fsinfo(server, fhandle, info);
4064
4065 return nfs4_map_errors(status);
4066 }
4067
4068 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
4069 struct nfs_fsinfo *info)
4070 {
4071 int error;
4072 struct nfs_fattr *fattr = info->fattr;
4073 struct nfs4_label *label = fattr->label;
4074
4075 error = nfs4_server_capabilities(server, mntfh);
4076 if (error < 0) {
4077 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
4078 return error;
4079 }
4080
4081 error = nfs4_proc_getattr(server, mntfh, fattr, label, NULL);
4082 if (error < 0) {
4083 dprintk("nfs4_get_root: getattr error = %d\n", -error);
4084 goto out;
4085 }
4086
4087 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
4088 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
4089 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
4090
4091 out:
4092 return error;
4093 }
4094
4095 /*
4096 * Get locations and (maybe) other attributes of a referral.
4097 * Note that we'll actually follow the referral later when
4098 * we detect fsid mismatch in inode revalidation
4099 */
4100 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
4101 const struct qstr *name, struct nfs_fattr *fattr,
4102 struct nfs_fh *fhandle)
4103 {
4104 int status = -ENOMEM;
4105 struct page *page = NULL;
4106 struct nfs4_fs_locations *locations = NULL;
4107
4108 page = alloc_page(GFP_KERNEL);
4109 if (page == NULL)
4110 goto out;
4111 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4112 if (locations == NULL)
4113 goto out;
4114
4115 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
4116 if (status != 0)
4117 goto out;
4118
4119 /*
4120 * If the fsid didn't change, this is a migration event, not a
4121 * referral. Cause us to drop into the exception handler, which
4122 * will kick off migration recovery.
4123 */
4124 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
4125 dprintk("%s: server did not return a different fsid for"
4126 " a referral at %s\n", __func__, name->name);
4127 status = -NFS4ERR_MOVED;
4128 goto out;
4129 }
4130 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
4131 nfs_fixup_referral_attributes(&locations->fattr);
4132
4133 /* replace the lookup nfs_fattr with the locations nfs_fattr */
4134 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
4135 memset(fhandle, 0, sizeof(struct nfs_fh));
4136 out:
4137 if (page)
4138 __free_page(page);
4139 kfree(locations);
4140 return status;
4141 }
4142
4143 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4144 struct nfs_fattr *fattr, struct nfs4_label *label,
4145 struct inode *inode)
4146 {
4147 __u32 bitmask[NFS4_BITMASK_SZ];
4148 struct nfs4_getattr_arg args = {
4149 .fh = fhandle,
4150 .bitmask = bitmask,
4151 };
4152 struct nfs4_getattr_res res = {
4153 .fattr = fattr,
4154 .label = label,
4155 .server = server,
4156 };
4157 struct rpc_message msg = {
4158 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4159 .rpc_argp = &args,
4160 .rpc_resp = &res,
4161 };
4162 unsigned short task_flags = 0;
4163
4164 /* Is this is an attribute revalidation, subject to softreval? */
4165 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
4166 task_flags |= RPC_TASK_TIMEOUT;
4167
4168 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
4169
4170 nfs_fattr_init(fattr);
4171 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4172 return nfs4_do_call_sync(server->client, server, &msg,
4173 &args.seq_args, &res.seq_res, task_flags);
4174 }
4175
4176 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4177 struct nfs_fattr *fattr, struct nfs4_label *label,
4178 struct inode *inode)
4179 {
4180 struct nfs4_exception exception = {
4181 .interruptible = true,
4182 };
4183 int err;
4184 do {
4185 err = _nfs4_proc_getattr(server, fhandle, fattr, label, inode);
4186 trace_nfs4_getattr(server, fhandle, fattr, err);
4187 err = nfs4_handle_exception(server, err,
4188 &exception);
4189 } while (exception.retry);
4190 return err;
4191 }
4192
4193 /*
4194 * The file is not closed if it is opened due to the a request to change
4195 * the size of the file. The open call will not be needed once the
4196 * VFS layer lookup-intents are implemented.
4197 *
4198 * Close is called when the inode is destroyed.
4199 * If we haven't opened the file for O_WRONLY, we
4200 * need to in the size_change case to obtain a stateid.
4201 *
4202 * Got race?
4203 * Because OPEN is always done by name in nfsv4, it is
4204 * possible that we opened a different file by the same
4205 * name. We can recognize this race condition, but we
4206 * can't do anything about it besides returning an error.
4207 *
4208 * This will be fixed with VFS changes (lookup-intent).
4209 */
4210 static int
4211 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
4212 struct iattr *sattr)
4213 {
4214 struct inode *inode = d_inode(dentry);
4215 const struct cred *cred = NULL;
4216 struct nfs_open_context *ctx = NULL;
4217 struct nfs4_label *label = NULL;
4218 int status;
4219
4220 if (pnfs_ld_layoutret_on_setattr(inode) &&
4221 sattr->ia_valid & ATTR_SIZE &&
4222 sattr->ia_size < i_size_read(inode))
4223 pnfs_commit_and_return_layout(inode);
4224
4225 nfs_fattr_init(fattr);
4226
4227 /* Deal with open(O_TRUNC) */
4228 if (sattr->ia_valid & ATTR_OPEN)
4229 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
4230
4231 /* Optimization: if the end result is no change, don't RPC */
4232 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
4233 return 0;
4234
4235 /* Search for an existing open(O_WRITE) file */
4236 if (sattr->ia_valid & ATTR_FILE) {
4237
4238 ctx = nfs_file_open_context(sattr->ia_file);
4239 if (ctx)
4240 cred = ctx->cred;
4241 }
4242
4243 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
4244 if (IS_ERR(label))
4245 return PTR_ERR(label);
4246
4247 /* Return any delegations if we're going to change ACLs */
4248 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
4249 nfs4_inode_make_writeable(inode);
4250
4251 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL, label);
4252 if (status == 0) {
4253 nfs_setattr_update_inode(inode, sattr, fattr);
4254 nfs_setsecurity(inode, fattr, label);
4255 }
4256 nfs4_label_free(label);
4257 return status;
4258 }
4259
4260 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
4261 struct dentry *dentry, struct nfs_fh *fhandle,
4262 struct nfs_fattr *fattr, struct nfs4_label *label)
4263 {
4264 struct nfs_server *server = NFS_SERVER(dir);
4265 int status;
4266 struct nfs4_lookup_arg args = {
4267 .bitmask = server->attr_bitmask,
4268 .dir_fh = NFS_FH(dir),
4269 .name = &dentry->d_name,
4270 };
4271 struct nfs4_lookup_res res = {
4272 .server = server,
4273 .fattr = fattr,
4274 .label = label,
4275 .fh = fhandle,
4276 };
4277 struct rpc_message msg = {
4278 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
4279 .rpc_argp = &args,
4280 .rpc_resp = &res,
4281 };
4282 unsigned short task_flags = 0;
4283
4284 /* Is this is an attribute revalidation, subject to softreval? */
4285 if (nfs_lookup_is_soft_revalidate(dentry))
4286 task_flags |= RPC_TASK_TIMEOUT;
4287
4288 args.bitmask = nfs4_bitmask(server, label);
4289
4290 nfs_fattr_init(fattr);
4291
4292 dprintk("NFS call lookup %pd2\n", dentry);
4293 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4294 status = nfs4_do_call_sync(clnt, server, &msg,
4295 &args.seq_args, &res.seq_res, task_flags);
4296 dprintk("NFS reply lookup: %d\n", status);
4297 return status;
4298 }
4299
4300 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
4301 {
4302 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4303 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
4304 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4305 fattr->nlink = 2;
4306 }
4307
4308 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
4309 struct dentry *dentry, struct nfs_fh *fhandle,
4310 struct nfs_fattr *fattr, struct nfs4_label *label)
4311 {
4312 struct nfs4_exception exception = {
4313 .interruptible = true,
4314 };
4315 struct rpc_clnt *client = *clnt;
4316 const struct qstr *name = &dentry->d_name;
4317 int err;
4318 do {
4319 err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr, label);
4320 trace_nfs4_lookup(dir, name, err);
4321 switch (err) {
4322 case -NFS4ERR_BADNAME:
4323 err = -ENOENT;
4324 goto out;
4325 case -NFS4ERR_MOVED:
4326 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
4327 if (err == -NFS4ERR_MOVED)
4328 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4329 goto out;
4330 case -NFS4ERR_WRONGSEC:
4331 err = -EPERM;
4332 if (client != *clnt)
4333 goto out;
4334 client = nfs4_negotiate_security(client, dir, name);
4335 if (IS_ERR(client))
4336 return PTR_ERR(client);
4337
4338 exception.retry = 1;
4339 break;
4340 default:
4341 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4342 }
4343 } while (exception.retry);
4344
4345 out:
4346 if (err == 0)
4347 *clnt = client;
4348 else if (client != *clnt)
4349 rpc_shutdown_client(client);
4350
4351 return err;
4352 }
4353
4354 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
4355 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4356 struct nfs4_label *label)
4357 {
4358 int status;
4359 struct rpc_clnt *client = NFS_CLIENT(dir);
4360
4361 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, label);
4362 if (client != NFS_CLIENT(dir)) {
4363 rpc_shutdown_client(client);
4364 nfs_fixup_secinfo_attributes(fattr);
4365 }
4366 return status;
4367 }
4368
4369 struct rpc_clnt *
4370 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
4371 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4372 {
4373 struct rpc_clnt *client = NFS_CLIENT(dir);
4374 int status;
4375
4376 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, NULL);
4377 if (status < 0)
4378 return ERR_PTR(status);
4379 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
4380 }
4381
4382 static int _nfs4_proc_lookupp(struct inode *inode,
4383 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4384 struct nfs4_label *label)
4385 {
4386 struct rpc_clnt *clnt = NFS_CLIENT(inode);
4387 struct nfs_server *server = NFS_SERVER(inode);
4388 int status;
4389 struct nfs4_lookupp_arg args = {
4390 .bitmask = server->attr_bitmask,
4391 .fh = NFS_FH(inode),
4392 };
4393 struct nfs4_lookupp_res res = {
4394 .server = server,
4395 .fattr = fattr,
4396 .label = label,
4397 .fh = fhandle,
4398 };
4399 struct rpc_message msg = {
4400 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
4401 .rpc_argp = &args,
4402 .rpc_resp = &res,
4403 };
4404 unsigned short task_flags = 0;
4405
4406 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
4407 task_flags |= RPC_TASK_TIMEOUT;
4408
4409 args.bitmask = nfs4_bitmask(server, label);
4410
4411 nfs_fattr_init(fattr);
4412
4413 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
4414 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
4415 &res.seq_res, task_flags);
4416 dprintk("NFS reply lookupp: %d\n", status);
4417 return status;
4418 }
4419
4420 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
4421 struct nfs_fattr *fattr, struct nfs4_label *label)
4422 {
4423 struct nfs4_exception exception = {
4424 .interruptible = true,
4425 };
4426 int err;
4427 do {
4428 err = _nfs4_proc_lookupp(inode, fhandle, fattr, label);
4429 trace_nfs4_lookupp(inode, err);
4430 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4431 &exception);
4432 } while (exception.retry);
4433 return err;
4434 }
4435
4436 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4437 {
4438 struct nfs_server *server = NFS_SERVER(inode);
4439 struct nfs4_accessargs args = {
4440 .fh = NFS_FH(inode),
4441 .access = entry->mask,
4442 };
4443 struct nfs4_accessres res = {
4444 .server = server,
4445 };
4446 struct rpc_message msg = {
4447 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
4448 .rpc_argp = &args,
4449 .rpc_resp = &res,
4450 .rpc_cred = entry->cred,
4451 };
4452 int status = 0;
4453
4454 if (!nfs4_have_delegation(inode, FMODE_READ)) {
4455 res.fattr = nfs_alloc_fattr();
4456 if (res.fattr == NULL)
4457 return -ENOMEM;
4458 args.bitmask = server->cache_consistency_bitmask;
4459 }
4460 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4461 if (!status) {
4462 nfs_access_set_mask(entry, res.access);
4463 if (res.fattr)
4464 nfs_refresh_inode(inode, res.fattr);
4465 }
4466 nfs_free_fattr(res.fattr);
4467 return status;
4468 }
4469
4470 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4471 {
4472 struct nfs4_exception exception = {
4473 .interruptible = true,
4474 };
4475 int err;
4476 do {
4477 err = _nfs4_proc_access(inode, entry);
4478 trace_nfs4_access(inode, err);
4479 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4480 &exception);
4481 } while (exception.retry);
4482 return err;
4483 }
4484
4485 /*
4486 * TODO: For the time being, we don't try to get any attributes
4487 * along with any of the zero-copy operations READ, READDIR,
4488 * READLINK, WRITE.
4489 *
4490 * In the case of the first three, we want to put the GETATTR
4491 * after the read-type operation -- this is because it is hard
4492 * to predict the length of a GETATTR response in v4, and thus
4493 * align the READ data correctly. This means that the GETATTR
4494 * may end up partially falling into the page cache, and we should
4495 * shift it into the 'tail' of the xdr_buf before processing.
4496 * To do this efficiently, we need to know the total length
4497 * of data received, which doesn't seem to be available outside
4498 * of the RPC layer.
4499 *
4500 * In the case of WRITE, we also want to put the GETATTR after
4501 * the operation -- in this case because we want to make sure
4502 * we get the post-operation mtime and size.
4503 *
4504 * Both of these changes to the XDR layer would in fact be quite
4505 * minor, but I decided to leave them for a subsequent patch.
4506 */
4507 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
4508 unsigned int pgbase, unsigned int pglen)
4509 {
4510 struct nfs4_readlink args = {
4511 .fh = NFS_FH(inode),
4512 .pgbase = pgbase,
4513 .pglen = pglen,
4514 .pages = &page,
4515 };
4516 struct nfs4_readlink_res res;
4517 struct rpc_message msg = {
4518 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
4519 .rpc_argp = &args,
4520 .rpc_resp = &res,
4521 };
4522
4523 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
4524 }
4525
4526 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
4527 unsigned int pgbase, unsigned int pglen)
4528 {
4529 struct nfs4_exception exception = {
4530 .interruptible = true,
4531 };
4532 int err;
4533 do {
4534 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
4535 trace_nfs4_readlink(inode, err);
4536 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4537 &exception);
4538 } while (exception.retry);
4539 return err;
4540 }
4541
4542 /*
4543 * This is just for mknod. open(O_CREAT) will always do ->open_context().
4544 */
4545 static int
4546 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
4547 int flags)
4548 {
4549 struct nfs_server *server = NFS_SERVER(dir);
4550 struct nfs4_label l, *ilabel = NULL;
4551 struct nfs_open_context *ctx;
4552 struct nfs4_state *state;
4553 int status = 0;
4554
4555 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
4556 if (IS_ERR(ctx))
4557 return PTR_ERR(ctx);
4558
4559 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
4560
4561 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4562 sattr->ia_mode &= ~current_umask();
4563 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
4564 if (IS_ERR(state)) {
4565 status = PTR_ERR(state);
4566 goto out;
4567 }
4568 out:
4569 nfs4_label_release_security(ilabel);
4570 put_nfs_open_context(ctx);
4571 return status;
4572 }
4573
4574 static int
4575 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
4576 {
4577 struct nfs_server *server = NFS_SERVER(dir);
4578 struct nfs_removeargs args = {
4579 .fh = NFS_FH(dir),
4580 .name = *name,
4581 };
4582 struct nfs_removeres res = {
4583 .server = server,
4584 };
4585 struct rpc_message msg = {
4586 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
4587 .rpc_argp = &args,
4588 .rpc_resp = &res,
4589 };
4590 unsigned long timestamp = jiffies;
4591 int status;
4592
4593 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4594 if (status == 0) {
4595 spin_lock(&dir->i_lock);
4596 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp,
4597 NFS_INO_INVALID_DATA);
4598 /* Removing a directory decrements nlink in the parent */
4599 if (ftype == NF4DIR && dir->i_nlink > 2)
4600 nfs4_dec_nlink_locked(dir);
4601 spin_unlock(&dir->i_lock);
4602 }
4603 return status;
4604 }
4605
4606 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
4607 {
4608 struct nfs4_exception exception = {
4609 .interruptible = true,
4610 };
4611 struct inode *inode = d_inode(dentry);
4612 int err;
4613
4614 if (inode) {
4615 if (inode->i_nlink == 1)
4616 nfs4_inode_return_delegation(inode);
4617 else
4618 nfs4_inode_make_writeable(inode);
4619 }
4620 do {
4621 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
4622 trace_nfs4_remove(dir, &dentry->d_name, err);
4623 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4624 &exception);
4625 } while (exception.retry);
4626 return err;
4627 }
4628
4629 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
4630 {
4631 struct nfs4_exception exception = {
4632 .interruptible = true,
4633 };
4634 int err;
4635
4636 do {
4637 err = _nfs4_proc_remove(dir, name, NF4DIR);
4638 trace_nfs4_remove(dir, name, err);
4639 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4640 &exception);
4641 } while (exception.retry);
4642 return err;
4643 }
4644
4645 static void nfs4_proc_unlink_setup(struct rpc_message *msg,
4646 struct dentry *dentry,
4647 struct inode *inode)
4648 {
4649 struct nfs_removeargs *args = msg->rpc_argp;
4650 struct nfs_removeres *res = msg->rpc_resp;
4651
4652 res->server = NFS_SB(dentry->d_sb);
4653 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
4654 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0);
4655
4656 nfs_fattr_init(res->dir_attr);
4657
4658 if (inode)
4659 nfs4_inode_return_delegation(inode);
4660 }
4661
4662 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
4663 {
4664 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
4665 &data->args.seq_args,
4666 &data->res.seq_res,
4667 task);
4668 }
4669
4670 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4671 {
4672 struct nfs_unlinkdata *data = task->tk_calldata;
4673 struct nfs_removeres *res = &data->res;
4674
4675 if (!nfs4_sequence_done(task, &res->seq_res))
4676 return 0;
4677 if (nfs4_async_handle_error(task, res->server, NULL,
4678 &data->timeout) == -EAGAIN)
4679 return 0;
4680 if (task->tk_status == 0)
4681 nfs4_update_changeattr(dir, &res->cinfo,
4682 res->dir_attr->time_start,
4683 NFS_INO_INVALID_DATA);
4684 return 1;
4685 }
4686
4687 static void nfs4_proc_rename_setup(struct rpc_message *msg,
4688 struct dentry *old_dentry,
4689 struct dentry *new_dentry)
4690 {
4691 struct nfs_renameargs *arg = msg->rpc_argp;
4692 struct nfs_renameres *res = msg->rpc_resp;
4693 struct inode *old_inode = d_inode(old_dentry);
4694 struct inode *new_inode = d_inode(new_dentry);
4695
4696 if (old_inode)
4697 nfs4_inode_make_writeable(old_inode);
4698 if (new_inode)
4699 nfs4_inode_return_delegation(new_inode);
4700 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
4701 res->server = NFS_SB(old_dentry->d_sb);
4702 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0);
4703 }
4704
4705 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
4706 {
4707 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
4708 &data->args.seq_args,
4709 &data->res.seq_res,
4710 task);
4711 }
4712
4713 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
4714 struct inode *new_dir)
4715 {
4716 struct nfs_renamedata *data = task->tk_calldata;
4717 struct nfs_renameres *res = &data->res;
4718
4719 if (!nfs4_sequence_done(task, &res->seq_res))
4720 return 0;
4721 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
4722 return 0;
4723
4724 if (task->tk_status == 0) {
4725 if (new_dir != old_dir) {
4726 /* Note: If we moved a directory, nlink will change */
4727 nfs4_update_changeattr(old_dir, &res->old_cinfo,
4728 res->old_fattr->time_start,
4729 NFS_INO_INVALID_OTHER |
4730 NFS_INO_INVALID_DATA);
4731 nfs4_update_changeattr(new_dir, &res->new_cinfo,
4732 res->new_fattr->time_start,
4733 NFS_INO_INVALID_OTHER |
4734 NFS_INO_INVALID_DATA);
4735 } else
4736 nfs4_update_changeattr(old_dir, &res->old_cinfo,
4737 res->old_fattr->time_start,
4738 NFS_INO_INVALID_DATA);
4739 }
4740 return 1;
4741 }
4742
4743 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4744 {
4745 struct nfs_server *server = NFS_SERVER(inode);
4746 __u32 bitmask[NFS4_BITMASK_SZ];
4747 struct nfs4_link_arg arg = {
4748 .fh = NFS_FH(inode),
4749 .dir_fh = NFS_FH(dir),
4750 .name = name,
4751 .bitmask = bitmask,
4752 };
4753 struct nfs4_link_res res = {
4754 .server = server,
4755 .label = NULL,
4756 };
4757 struct rpc_message msg = {
4758 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
4759 .rpc_argp = &arg,
4760 .rpc_resp = &res,
4761 };
4762 int status = -ENOMEM;
4763
4764 res.fattr = nfs_alloc_fattr();
4765 if (res.fattr == NULL)
4766 goto out;
4767
4768 res.label = nfs4_label_alloc(server, GFP_KERNEL);
4769 if (IS_ERR(res.label)) {
4770 status = PTR_ERR(res.label);
4771 goto out;
4772 }
4773
4774 nfs4_inode_make_writeable(inode);
4775 nfs4_bitmap_copy_adjust_setattr(bitmask, nfs4_bitmask(server, res.label), inode);
4776
4777 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4778 if (!status) {
4779 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
4780 NFS_INO_INVALID_DATA);
4781 status = nfs_post_op_update_inode(inode, res.fattr);
4782 if (!status)
4783 nfs_setsecurity(inode, res.fattr, res.label);
4784 }
4785
4786
4787 nfs4_label_free(res.label);
4788
4789 out:
4790 nfs_free_fattr(res.fattr);
4791 return status;
4792 }
4793
4794 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4795 {
4796 struct nfs4_exception exception = {
4797 .interruptible = true,
4798 };
4799 int err;
4800 do {
4801 err = nfs4_handle_exception(NFS_SERVER(inode),
4802 _nfs4_proc_link(inode, dir, name),
4803 &exception);
4804 } while (exception.retry);
4805 return err;
4806 }
4807
4808 struct nfs4_createdata {
4809 struct rpc_message msg;
4810 struct nfs4_create_arg arg;
4811 struct nfs4_create_res res;
4812 struct nfs_fh fh;
4813 struct nfs_fattr fattr;
4814 struct nfs4_label *label;
4815 };
4816
4817 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
4818 const struct qstr *name, struct iattr *sattr, u32 ftype)
4819 {
4820 struct nfs4_createdata *data;
4821
4822 data = kzalloc(sizeof(*data), GFP_KERNEL);
4823 if (data != NULL) {
4824 struct nfs_server *server = NFS_SERVER(dir);
4825
4826 data->label = nfs4_label_alloc(server, GFP_KERNEL);
4827 if (IS_ERR(data->label))
4828 goto out_free;
4829
4830 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
4831 data->msg.rpc_argp = &data->arg;
4832 data->msg.rpc_resp = &data->res;
4833 data->arg.dir_fh = NFS_FH(dir);
4834 data->arg.server = server;
4835 data->arg.name = name;
4836 data->arg.attrs = sattr;
4837 data->arg.ftype = ftype;
4838 data->arg.bitmask = nfs4_bitmask(server, data->label);
4839 data->arg.umask = current_umask();
4840 data->res.server = server;
4841 data->res.fh = &data->fh;
4842 data->res.fattr = &data->fattr;
4843 data->res.label = data->label;
4844 nfs_fattr_init(data->res.fattr);
4845 }
4846 return data;
4847 out_free:
4848 kfree(data);
4849 return NULL;
4850 }
4851
4852 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
4853 {
4854 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
4855 &data->arg.seq_args, &data->res.seq_res, 1);
4856 if (status == 0) {
4857 spin_lock(&dir->i_lock);
4858 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
4859 data->res.fattr->time_start,
4860 NFS_INO_INVALID_DATA);
4861 /* Creating a directory bumps nlink in the parent */
4862 if (data->arg.ftype == NF4DIR)
4863 nfs4_inc_nlink_locked(dir);
4864 spin_unlock(&dir->i_lock);
4865 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
4866 }
4867 return status;
4868 }
4869
4870 static void nfs4_free_createdata(struct nfs4_createdata *data)
4871 {
4872 nfs4_label_free(data->label);
4873 kfree(data);
4874 }
4875
4876 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4877 struct page *page, unsigned int len, struct iattr *sattr,
4878 struct nfs4_label *label)
4879 {
4880 struct nfs4_createdata *data;
4881 int status = -ENAMETOOLONG;
4882
4883 if (len > NFS4_MAXPATHLEN)
4884 goto out;
4885
4886 status = -ENOMEM;
4887 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
4888 if (data == NULL)
4889 goto out;
4890
4891 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
4892 data->arg.u.symlink.pages = &page;
4893 data->arg.u.symlink.len = len;
4894 data->arg.label = label;
4895
4896 status = nfs4_do_create(dir, dentry, data);
4897
4898 nfs4_free_createdata(data);
4899 out:
4900 return status;
4901 }
4902
4903 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4904 struct page *page, unsigned int len, struct iattr *sattr)
4905 {
4906 struct nfs4_exception exception = {
4907 .interruptible = true,
4908 };
4909 struct nfs4_label l, *label = NULL;
4910 int err;
4911
4912 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4913
4914 do {
4915 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
4916 trace_nfs4_symlink(dir, &dentry->d_name, err);
4917 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4918 &exception);
4919 } while (exception.retry);
4920
4921 nfs4_label_release_security(label);
4922 return err;
4923 }
4924
4925 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4926 struct iattr *sattr, struct nfs4_label *label)
4927 {
4928 struct nfs4_createdata *data;
4929 int status = -ENOMEM;
4930
4931 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
4932 if (data == NULL)
4933 goto out;
4934
4935 data->arg.label = label;
4936 status = nfs4_do_create(dir, dentry, data);
4937
4938 nfs4_free_createdata(data);
4939 out:
4940 return status;
4941 }
4942
4943 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4944 struct iattr *sattr)
4945 {
4946 struct nfs_server *server = NFS_SERVER(dir);
4947 struct nfs4_exception exception = {
4948 .interruptible = true,
4949 };
4950 struct nfs4_label l, *label = NULL;
4951 int err;
4952
4953 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4954
4955 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4956 sattr->ia_mode &= ~current_umask();
4957 do {
4958 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
4959 trace_nfs4_mkdir(dir, &dentry->d_name, err);
4960 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4961 &exception);
4962 } while (exception.retry);
4963 nfs4_label_release_security(label);
4964
4965 return err;
4966 }
4967
4968 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
4969 struct nfs_readdir_res *nr_res)
4970 {
4971 struct inode *dir = d_inode(nr_arg->dentry);
4972 struct nfs_server *server = NFS_SERVER(dir);
4973 struct nfs4_readdir_arg args = {
4974 .fh = NFS_FH(dir),
4975 .pages = nr_arg->pages,
4976 .pgbase = 0,
4977 .count = nr_arg->page_len,
4978 .plus = nr_arg->plus,
4979 };
4980 struct nfs4_readdir_res res;
4981 struct rpc_message msg = {
4982 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
4983 .rpc_argp = &args,
4984 .rpc_resp = &res,
4985 .rpc_cred = nr_arg->cred,
4986 };
4987 int status;
4988
4989 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__,
4990 nr_arg->dentry, (unsigned long long)nr_arg->cookie);
4991 if (!(server->caps & NFS_CAP_SECURITY_LABEL))
4992 args.bitmask = server->attr_bitmask_nl;
4993 else
4994 args.bitmask = server->attr_bitmask;
4995
4996 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args);
4997 res.pgbase = args.pgbase;
4998 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
4999 &res.seq_res, 0);
5000 if (status >= 0) {
5001 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE);
5002 status += args.pgbase;
5003 }
5004
5005 nfs_invalidate_atime(dir);
5006
5007 dprintk("%s: returns %d\n", __func__, status);
5008 return status;
5009 }
5010
5011 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg,
5012 struct nfs_readdir_res *res)
5013 {
5014 struct nfs4_exception exception = {
5015 .interruptible = true,
5016 };
5017 int err;
5018 do {
5019 err = _nfs4_proc_readdir(arg, res);
5020 trace_nfs4_readdir(d_inode(arg->dentry), err);
5021 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)),
5022 err, &exception);
5023 } while (exception.retry);
5024 return err;
5025 }
5026
5027 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5028 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
5029 {
5030 struct nfs4_createdata *data;
5031 int mode = sattr->ia_mode;
5032 int status = -ENOMEM;
5033
5034 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
5035 if (data == NULL)
5036 goto out;
5037
5038 if (S_ISFIFO(mode))
5039 data->arg.ftype = NF4FIFO;
5040 else if (S_ISBLK(mode)) {
5041 data->arg.ftype = NF4BLK;
5042 data->arg.u.device.specdata1 = MAJOR(rdev);
5043 data->arg.u.device.specdata2 = MINOR(rdev);
5044 }
5045 else if (S_ISCHR(mode)) {
5046 data->arg.ftype = NF4CHR;
5047 data->arg.u.device.specdata1 = MAJOR(rdev);
5048 data->arg.u.device.specdata2 = MINOR(rdev);
5049 } else if (!S_ISSOCK(mode)) {
5050 status = -EINVAL;
5051 goto out_free;
5052 }
5053
5054 data->arg.label = label;
5055 status = nfs4_do_create(dir, dentry, data);
5056 out_free:
5057 nfs4_free_createdata(data);
5058 out:
5059 return status;
5060 }
5061
5062 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5063 struct iattr *sattr, dev_t rdev)
5064 {
5065 struct nfs_server *server = NFS_SERVER(dir);
5066 struct nfs4_exception exception = {
5067 .interruptible = true,
5068 };
5069 struct nfs4_label l, *label = NULL;
5070 int err;
5071
5072 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5073
5074 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5075 sattr->ia_mode &= ~current_umask();
5076 do {
5077 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
5078 trace_nfs4_mknod(dir, &dentry->d_name, err);
5079 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5080 &exception);
5081 } while (exception.retry);
5082
5083 nfs4_label_release_security(label);
5084
5085 return err;
5086 }
5087
5088 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
5089 struct nfs_fsstat *fsstat)
5090 {
5091 struct nfs4_statfs_arg args = {
5092 .fh = fhandle,
5093 .bitmask = server->attr_bitmask,
5094 };
5095 struct nfs4_statfs_res res = {
5096 .fsstat = fsstat,
5097 };
5098 struct rpc_message msg = {
5099 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
5100 .rpc_argp = &args,
5101 .rpc_resp = &res,
5102 };
5103
5104 nfs_fattr_init(fsstat->fattr);
5105 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5106 }
5107
5108 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
5109 {
5110 struct nfs4_exception exception = {
5111 .interruptible = true,
5112 };
5113 int err;
5114 do {
5115 err = nfs4_handle_exception(server,
5116 _nfs4_proc_statfs(server, fhandle, fsstat),
5117 &exception);
5118 } while (exception.retry);
5119 return err;
5120 }
5121
5122 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
5123 struct nfs_fsinfo *fsinfo)
5124 {
5125 struct nfs4_fsinfo_arg args = {
5126 .fh = fhandle,
5127 .bitmask = server->attr_bitmask,
5128 };
5129 struct nfs4_fsinfo_res res = {
5130 .fsinfo = fsinfo,
5131 };
5132 struct rpc_message msg = {
5133 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
5134 .rpc_argp = &args,
5135 .rpc_resp = &res,
5136 };
5137
5138 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5139 }
5140
5141 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5142 {
5143 struct nfs4_exception exception = {
5144 .interruptible = true,
5145 };
5146 int err;
5147
5148 do {
5149 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
5150 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
5151 if (err == 0) {
5152 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
5153 break;
5154 }
5155 err = nfs4_handle_exception(server, err, &exception);
5156 } while (exception.retry);
5157 return err;
5158 }
5159
5160 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5161 {
5162 int error;
5163
5164 nfs_fattr_init(fsinfo->fattr);
5165 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
5166 if (error == 0) {
5167 /* block layout checks this! */
5168 server->pnfs_blksize = fsinfo->blksize;
5169 set_pnfs_layoutdriver(server, fhandle, fsinfo);
5170 }
5171
5172 return error;
5173 }
5174
5175 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5176 struct nfs_pathconf *pathconf)
5177 {
5178 struct nfs4_pathconf_arg args = {
5179 .fh = fhandle,
5180 .bitmask = server->attr_bitmask,
5181 };
5182 struct nfs4_pathconf_res res = {
5183 .pathconf = pathconf,
5184 };
5185 struct rpc_message msg = {
5186 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
5187 .rpc_argp = &args,
5188 .rpc_resp = &res,
5189 };
5190
5191 /* None of the pathconf attributes are mandatory to implement */
5192 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
5193 memset(pathconf, 0, sizeof(*pathconf));
5194 return 0;
5195 }
5196
5197 nfs_fattr_init(pathconf->fattr);
5198 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5199 }
5200
5201 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5202 struct nfs_pathconf *pathconf)
5203 {
5204 struct nfs4_exception exception = {
5205 .interruptible = true,
5206 };
5207 int err;
5208
5209 do {
5210 err = nfs4_handle_exception(server,
5211 _nfs4_proc_pathconf(server, fhandle, pathconf),
5212 &exception);
5213 } while (exception.retry);
5214 return err;
5215 }
5216
5217 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
5218 const struct nfs_open_context *ctx,
5219 const struct nfs_lock_context *l_ctx,
5220 fmode_t fmode)
5221 {
5222 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
5223 }
5224 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
5225
5226 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
5227 const struct nfs_open_context *ctx,
5228 const struct nfs_lock_context *l_ctx,
5229 fmode_t fmode)
5230 {
5231 nfs4_stateid _current_stateid;
5232
5233 /* If the current stateid represents a lost lock, then exit */
5234 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO)
5235 return true;
5236 return nfs4_stateid_match(stateid, &_current_stateid);
5237 }
5238
5239 static bool nfs4_error_stateid_expired(int err)
5240 {
5241 switch (err) {
5242 case -NFS4ERR_DELEG_REVOKED:
5243 case -NFS4ERR_ADMIN_REVOKED:
5244 case -NFS4ERR_BAD_STATEID:
5245 case -NFS4ERR_STALE_STATEID:
5246 case -NFS4ERR_OLD_STATEID:
5247 case -NFS4ERR_OPENMODE:
5248 case -NFS4ERR_EXPIRED:
5249 return true;
5250 }
5251 return false;
5252 }
5253
5254 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
5255 {
5256 struct nfs_server *server = NFS_SERVER(hdr->inode);
5257
5258 trace_nfs4_read(hdr, task->tk_status);
5259 if (task->tk_status < 0) {
5260 struct nfs4_exception exception = {
5261 .inode = hdr->inode,
5262 .state = hdr->args.context->state,
5263 .stateid = &hdr->args.stateid,
5264 };
5265 task->tk_status = nfs4_async_handle_exception(task,
5266 server, task->tk_status, &exception);
5267 if (exception.retry) {
5268 rpc_restart_call_prepare(task);
5269 return -EAGAIN;
5270 }
5271 }
5272
5273 if (task->tk_status > 0)
5274 renew_lease(server, hdr->timestamp);
5275 return 0;
5276 }
5277
5278 static bool nfs4_read_stateid_changed(struct rpc_task *task,
5279 struct nfs_pgio_args *args)
5280 {
5281
5282 if (!nfs4_error_stateid_expired(task->tk_status) ||
5283 nfs4_stateid_is_current(&args->stateid,
5284 args->context,
5285 args->lock_context,
5286 FMODE_READ))
5287 return false;
5288 rpc_restart_call_prepare(task);
5289 return true;
5290 }
5291
5292 static bool nfs4_read_plus_not_supported(struct rpc_task *task,
5293 struct nfs_pgio_header *hdr)
5294 {
5295 struct nfs_server *server = NFS_SERVER(hdr->inode);
5296 struct rpc_message *msg = &task->tk_msg;
5297
5298 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
5299 server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
5300 server->caps &= ~NFS_CAP_READ_PLUS;
5301 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5302 rpc_restart_call_prepare(task);
5303 return true;
5304 }
5305 return false;
5306 }
5307
5308 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5309 {
5310 dprintk("--> %s\n", __func__);
5311
5312 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5313 return -EAGAIN;
5314 if (nfs4_read_stateid_changed(task, &hdr->args))
5315 return -EAGAIN;
5316 if (nfs4_read_plus_not_supported(task, hdr))
5317 return -EAGAIN;
5318 if (task->tk_status > 0)
5319 nfs_invalidate_atime(hdr->inode);
5320 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5321 nfs4_read_done_cb(task, hdr);
5322 }
5323
5324 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
5325 static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5326 struct rpc_message *msg)
5327 {
5328 /* Note: We don't use READ_PLUS with pNFS yet */
5329 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp)
5330 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
5331 }
5332 #else
5333 static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5334 struct rpc_message *msg)
5335 {
5336 }
5337 #endif /* CONFIG_NFS_V4_2 */
5338
5339 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
5340 struct rpc_message *msg)
5341 {
5342 hdr->timestamp = jiffies;
5343 if (!hdr->pgio_done_cb)
5344 hdr->pgio_done_cb = nfs4_read_done_cb;
5345 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5346 nfs42_read_plus_support(hdr, msg);
5347 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5348 }
5349
5350 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
5351 struct nfs_pgio_header *hdr)
5352 {
5353 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
5354 &hdr->args.seq_args,
5355 &hdr->res.seq_res,
5356 task))
5357 return 0;
5358 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
5359 hdr->args.lock_context,
5360 hdr->rw_mode) == -EIO)
5361 return -EIO;
5362 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
5363 return -EIO;
5364 return 0;
5365 }
5366
5367 static int nfs4_write_done_cb(struct rpc_task *task,
5368 struct nfs_pgio_header *hdr)
5369 {
5370 struct inode *inode = hdr->inode;
5371
5372 trace_nfs4_write(hdr, task->tk_status);
5373 if (task->tk_status < 0) {
5374 struct nfs4_exception exception = {
5375 .inode = hdr->inode,
5376 .state = hdr->args.context->state,
5377 .stateid = &hdr->args.stateid,
5378 };
5379 task->tk_status = nfs4_async_handle_exception(task,
5380 NFS_SERVER(inode), task->tk_status,
5381 &exception);
5382 if (exception.retry) {
5383 rpc_restart_call_prepare(task);
5384 return -EAGAIN;
5385 }
5386 }
5387 if (task->tk_status >= 0) {
5388 renew_lease(NFS_SERVER(inode), hdr->timestamp);
5389 nfs_writeback_update_inode(hdr);
5390 }
5391 return 0;
5392 }
5393
5394 static bool nfs4_write_stateid_changed(struct rpc_task *task,
5395 struct nfs_pgio_args *args)
5396 {
5397
5398 if (!nfs4_error_stateid_expired(task->tk_status) ||
5399 nfs4_stateid_is_current(&args->stateid,
5400 args->context,
5401 args->lock_context,
5402 FMODE_WRITE))
5403 return false;
5404 rpc_restart_call_prepare(task);
5405 return true;
5406 }
5407
5408 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5409 {
5410 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5411 return -EAGAIN;
5412 if (nfs4_write_stateid_changed(task, &hdr->args))
5413 return -EAGAIN;
5414 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5415 nfs4_write_done_cb(task, hdr);
5416 }
5417
5418 static
5419 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
5420 {
5421 /* Don't request attributes for pNFS or O_DIRECT writes */
5422 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
5423 return false;
5424 /* Otherwise, request attributes if and only if we don't hold
5425 * a delegation
5426 */
5427 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
5428 }
5429
5430 static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
5431 struct nfs_server *server,
5432 struct nfs4_label *label)
5433 {
5434
5435 unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
5436
5437 if ((cache_validity & NFS_INO_INVALID_DATA) ||
5438 (cache_validity & NFS_INO_REVAL_PAGECACHE) ||
5439 (cache_validity & NFS_INO_REVAL_FORCED) ||
5440 (cache_validity & NFS_INO_INVALID_OTHER))
5441 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
5442
5443 if (cache_validity & NFS_INO_INVALID_ATIME)
5444 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
5445 if (cache_validity & NFS_INO_INVALID_OTHER)
5446 bitmask[1] |= FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER |
5447 FATTR4_WORD1_OWNER_GROUP |
5448 FATTR4_WORD1_NUMLINKS;
5449 if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
5450 bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
5451 if (cache_validity & NFS_INO_INVALID_CHANGE)
5452 bitmask[0] |= FATTR4_WORD0_CHANGE;
5453 if (cache_validity & NFS_INO_INVALID_CTIME)
5454 bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
5455 if (cache_validity & NFS_INO_INVALID_MTIME)
5456 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
5457 if (cache_validity & NFS_INO_INVALID_SIZE)
5458 bitmask[0] |= FATTR4_WORD0_SIZE;
5459 if (cache_validity & NFS_INO_INVALID_BLOCKS)
5460 bitmask[1] |= FATTR4_WORD1_SPACE_USED;
5461 }
5462
5463 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
5464 struct rpc_message *msg,
5465 struct rpc_clnt **clnt)
5466 {
5467 struct nfs_server *server = NFS_SERVER(hdr->inode);
5468
5469 if (!nfs4_write_need_cache_consistency_data(hdr)) {
5470 hdr->args.bitmask = NULL;
5471 hdr->res.fattr = NULL;
5472 } else {
5473 hdr->args.bitmask = server->cache_consistency_bitmask;
5474 nfs4_bitmask_adjust(hdr->args.bitmask, hdr->inode, server, NULL);
5475 }
5476
5477 if (!hdr->pgio_done_cb)
5478 hdr->pgio_done_cb = nfs4_write_done_cb;
5479 hdr->res.server = server;
5480 hdr->timestamp = jiffies;
5481
5482 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
5483 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5484 nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
5485 }
5486
5487 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
5488 {
5489 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
5490 &data->args.seq_args,
5491 &data->res.seq_res,
5492 task);
5493 }
5494
5495 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
5496 {
5497 struct inode *inode = data->inode;
5498
5499 trace_nfs4_commit(data, task->tk_status);
5500 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
5501 NULL, NULL) == -EAGAIN) {
5502 rpc_restart_call_prepare(task);
5503 return -EAGAIN;
5504 }
5505 return 0;
5506 }
5507
5508 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
5509 {
5510 if (!nfs4_sequence_done(task, &data->res.seq_res))
5511 return -EAGAIN;
5512 return data->commit_done_cb(task, data);
5513 }
5514
5515 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg,
5516 struct rpc_clnt **clnt)
5517 {
5518 struct nfs_server *server = NFS_SERVER(data->inode);
5519
5520 if (data->commit_done_cb == NULL)
5521 data->commit_done_cb = nfs4_commit_done_cb;
5522 data->res.server = server;
5523 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
5524 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
5525 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
5526 }
5527
5528 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
5529 struct nfs_commitres *res)
5530 {
5531 struct inode *dst_inode = file_inode(dst);
5532 struct nfs_server *server = NFS_SERVER(dst_inode);
5533 struct rpc_message msg = {
5534 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
5535 .rpc_argp = args,
5536 .rpc_resp = res,
5537 };
5538
5539 args->fh = NFS_FH(dst_inode);
5540 return nfs4_call_sync(server->client, server, &msg,
5541 &args->seq_args, &res->seq_res, 1);
5542 }
5543
5544 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
5545 {
5546 struct nfs_commitargs args = {
5547 .offset = offset,
5548 .count = count,
5549 };
5550 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
5551 struct nfs4_exception exception = { };
5552 int status;
5553
5554 do {
5555 status = _nfs4_proc_commit(dst, &args, res);
5556 status = nfs4_handle_exception(dst_server, status, &exception);
5557 } while (exception.retry);
5558
5559 return status;
5560 }
5561
5562 struct nfs4_renewdata {
5563 struct nfs_client *client;
5564 unsigned long timestamp;
5565 };
5566
5567 /*
5568 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
5569 * standalone procedure for queueing an asynchronous RENEW.
5570 */
5571 static void nfs4_renew_release(void *calldata)
5572 {
5573 struct nfs4_renewdata *data = calldata;
5574 struct nfs_client *clp = data->client;
5575
5576 if (refcount_read(&clp->cl_count) > 1)
5577 nfs4_schedule_state_renewal(clp);
5578 nfs_put_client(clp);
5579 kfree(data);
5580 }
5581
5582 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
5583 {
5584 struct nfs4_renewdata *data = calldata;
5585 struct nfs_client *clp = data->client;
5586 unsigned long timestamp = data->timestamp;
5587
5588 trace_nfs4_renew_async(clp, task->tk_status);
5589 switch (task->tk_status) {
5590 case 0:
5591 break;
5592 case -NFS4ERR_LEASE_MOVED:
5593 nfs4_schedule_lease_moved_recovery(clp);
5594 break;
5595 default:
5596 /* Unless we're shutting down, schedule state recovery! */
5597 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
5598 return;
5599 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
5600 nfs4_schedule_lease_recovery(clp);
5601 return;
5602 }
5603 nfs4_schedule_path_down_recovery(clp);
5604 }
5605 do_renew_lease(clp, timestamp);
5606 }
5607
5608 static const struct rpc_call_ops nfs4_renew_ops = {
5609 .rpc_call_done = nfs4_renew_done,
5610 .rpc_release = nfs4_renew_release,
5611 };
5612
5613 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
5614 {
5615 struct rpc_message msg = {
5616 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5617 .rpc_argp = clp,
5618 .rpc_cred = cred,
5619 };
5620 struct nfs4_renewdata *data;
5621
5622 if (renew_flags == 0)
5623 return 0;
5624 if (!refcount_inc_not_zero(&clp->cl_count))
5625 return -EIO;
5626 data = kmalloc(sizeof(*data), GFP_NOFS);
5627 if (data == NULL) {
5628 nfs_put_client(clp);
5629 return -ENOMEM;
5630 }
5631 data->client = clp;
5632 data->timestamp = jiffies;
5633 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
5634 &nfs4_renew_ops, data);
5635 }
5636
5637 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
5638 {
5639 struct rpc_message msg = {
5640 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5641 .rpc_argp = clp,
5642 .rpc_cred = cred,
5643 };
5644 unsigned long now = jiffies;
5645 int status;
5646
5647 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5648 if (status < 0)
5649 return status;
5650 do_renew_lease(clp, now);
5651 return 0;
5652 }
5653
5654 static inline int nfs4_server_supports_acls(struct nfs_server *server)
5655 {
5656 return server->caps & NFS_CAP_ACLS;
5657 }
5658
5659 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
5660 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
5661 * the stack.
5662 */
5663 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
5664
5665 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
5666 struct page **pages)
5667 {
5668 struct page *newpage, **spages;
5669 int rc = 0;
5670 size_t len;
5671 spages = pages;
5672
5673 do {
5674 len = min_t(size_t, PAGE_SIZE, buflen);
5675 newpage = alloc_page(GFP_KERNEL);
5676
5677 if (newpage == NULL)
5678 goto unwind;
5679 memcpy(page_address(newpage), buf, len);
5680 buf += len;
5681 buflen -= len;
5682 *pages++ = newpage;
5683 rc++;
5684 } while (buflen != 0);
5685
5686 return rc;
5687
5688 unwind:
5689 for(; rc > 0; rc--)
5690 __free_page(spages[rc-1]);
5691 return -ENOMEM;
5692 }
5693
5694 struct nfs4_cached_acl {
5695 int cached;
5696 size_t len;
5697 char data[];
5698 };
5699
5700 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
5701 {
5702 struct nfs_inode *nfsi = NFS_I(inode);
5703
5704 spin_lock(&inode->i_lock);
5705 kfree(nfsi->nfs4_acl);
5706 nfsi->nfs4_acl = acl;
5707 spin_unlock(&inode->i_lock);
5708 }
5709
5710 static void nfs4_zap_acl_attr(struct inode *inode)
5711 {
5712 nfs4_set_cached_acl(inode, NULL);
5713 }
5714
5715 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
5716 {
5717 struct nfs_inode *nfsi = NFS_I(inode);
5718 struct nfs4_cached_acl *acl;
5719 int ret = -ENOENT;
5720
5721 spin_lock(&inode->i_lock);
5722 acl = nfsi->nfs4_acl;
5723 if (acl == NULL)
5724 goto out;
5725 if (buf == NULL) /* user is just asking for length */
5726 goto out_len;
5727 if (acl->cached == 0)
5728 goto out;
5729 ret = -ERANGE; /* see getxattr(2) man page */
5730 if (acl->len > buflen)
5731 goto out;
5732 memcpy(buf, acl->data, acl->len);
5733 out_len:
5734 ret = acl->len;
5735 out:
5736 spin_unlock(&inode->i_lock);
5737 return ret;
5738 }
5739
5740 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
5741 {
5742 struct nfs4_cached_acl *acl;
5743 size_t buflen = sizeof(*acl) + acl_len;
5744
5745 if (buflen <= PAGE_SIZE) {
5746 acl = kmalloc(buflen, GFP_KERNEL);
5747 if (acl == NULL)
5748 goto out;
5749 acl->cached = 1;
5750 _copy_from_pages(acl->data, pages, pgbase, acl_len);
5751 } else {
5752 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
5753 if (acl == NULL)
5754 goto out;
5755 acl->cached = 0;
5756 }
5757 acl->len = acl_len;
5758 out:
5759 nfs4_set_cached_acl(inode, acl);
5760 }
5761
5762 /*
5763 * The getxattr API returns the required buffer length when called with a
5764 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
5765 * the required buf. On a NULL buf, we send a page of data to the server
5766 * guessing that the ACL request can be serviced by a page. If so, we cache
5767 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
5768 * the cache. If not so, we throw away the page, and cache the required
5769 * length. The next getxattr call will then produce another round trip to
5770 * the server, this time with the input buf of the required size.
5771 */
5772 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5773 {
5774 struct page **pages;
5775 struct nfs_getaclargs args = {
5776 .fh = NFS_FH(inode),
5777 .acl_len = buflen,
5778 };
5779 struct nfs_getaclres res = {
5780 .acl_len = buflen,
5781 };
5782 struct rpc_message msg = {
5783 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
5784 .rpc_argp = &args,
5785 .rpc_resp = &res,
5786 };
5787 unsigned int npages;
5788 int ret = -ENOMEM, i;
5789 struct nfs_server *server = NFS_SERVER(inode);
5790
5791 if (buflen == 0)
5792 buflen = server->rsize;
5793
5794 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
5795 pages = kmalloc_array(npages, sizeof(struct page *), GFP_NOFS);
5796 if (!pages)
5797 return -ENOMEM;
5798
5799 args.acl_pages = pages;
5800
5801 for (i = 0; i < npages; i++) {
5802 pages[i] = alloc_page(GFP_KERNEL);
5803 if (!pages[i])
5804 goto out_free;
5805 }
5806
5807 /* for decoding across pages */
5808 res.acl_scratch = alloc_page(GFP_KERNEL);
5809 if (!res.acl_scratch)
5810 goto out_free;
5811
5812 args.acl_len = npages * PAGE_SIZE;
5813
5814 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
5815 __func__, buf, buflen, npages, args.acl_len);
5816 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
5817 &msg, &args.seq_args, &res.seq_res, 0);
5818 if (ret)
5819 goto out_free;
5820
5821 /* Handle the case where the passed-in buffer is too short */
5822 if (res.acl_flags & NFS4_ACL_TRUNC) {
5823 /* Did the user only issue a request for the acl length? */
5824 if (buf == NULL)
5825 goto out_ok;
5826 ret = -ERANGE;
5827 goto out_free;
5828 }
5829 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
5830 if (buf) {
5831 if (res.acl_len > buflen) {
5832 ret = -ERANGE;
5833 goto out_free;
5834 }
5835 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
5836 }
5837 out_ok:
5838 ret = res.acl_len;
5839 out_free:
5840 for (i = 0; i < npages; i++)
5841 if (pages[i])
5842 __free_page(pages[i]);
5843 if (res.acl_scratch)
5844 __free_page(res.acl_scratch);
5845 kfree(pages);
5846 return ret;
5847 }
5848
5849 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5850 {
5851 struct nfs4_exception exception = {
5852 .interruptible = true,
5853 };
5854 ssize_t ret;
5855 do {
5856 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
5857 trace_nfs4_get_acl(inode, ret);
5858 if (ret >= 0)
5859 break;
5860 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
5861 } while (exception.retry);
5862 return ret;
5863 }
5864
5865 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
5866 {
5867 struct nfs_server *server = NFS_SERVER(inode);
5868 int ret;
5869
5870 if (!nfs4_server_supports_acls(server))
5871 return -EOPNOTSUPP;
5872 ret = nfs_revalidate_inode(server, inode);
5873 if (ret < 0)
5874 return ret;
5875 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
5876 nfs_zap_acl_cache(inode);
5877 ret = nfs4_read_cached_acl(inode, buf, buflen);
5878 if (ret != -ENOENT)
5879 /* -ENOENT is returned if there is no ACL or if there is an ACL
5880 * but no cached acl data, just the acl length */
5881 return ret;
5882 return nfs4_get_acl_uncached(inode, buf, buflen);
5883 }
5884
5885 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
5886 {
5887 struct nfs_server *server = NFS_SERVER(inode);
5888 struct page *pages[NFS4ACL_MAXPAGES];
5889 struct nfs_setaclargs arg = {
5890 .fh = NFS_FH(inode),
5891 .acl_pages = pages,
5892 .acl_len = buflen,
5893 };
5894 struct nfs_setaclres res;
5895 struct rpc_message msg = {
5896 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
5897 .rpc_argp = &arg,
5898 .rpc_resp = &res,
5899 };
5900 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
5901 int ret, i;
5902
5903 /* You can't remove system.nfs4_acl: */
5904 if (buflen == 0)
5905 return -EINVAL;
5906 if (!nfs4_server_supports_acls(server))
5907 return -EOPNOTSUPP;
5908 if (npages > ARRAY_SIZE(pages))
5909 return -ERANGE;
5910 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages);
5911 if (i < 0)
5912 return i;
5913 nfs4_inode_make_writeable(inode);
5914 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5915
5916 /*
5917 * Free each page after tx, so the only ref left is
5918 * held by the network stack
5919 */
5920 for (; i > 0; i--)
5921 put_page(pages[i-1]);
5922
5923 /*
5924 * Acl update can result in inode attribute update.
5925 * so mark the attribute cache invalid.
5926 */
5927 spin_lock(&inode->i_lock);
5928 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
5929 | NFS_INO_INVALID_CTIME
5930 | NFS_INO_REVAL_FORCED;
5931 spin_unlock(&inode->i_lock);
5932 nfs_access_zap_cache(inode);
5933 nfs_zap_acl_cache(inode);
5934 return ret;
5935 }
5936
5937 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
5938 {
5939 struct nfs4_exception exception = { };
5940 int err;
5941 do {
5942 err = __nfs4_proc_set_acl(inode, buf, buflen);
5943 trace_nfs4_set_acl(inode, err);
5944 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5945 &exception);
5946 } while (exception.retry);
5947 return err;
5948 }
5949
5950 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
5951 static int _nfs4_get_security_label(struct inode *inode, void *buf,
5952 size_t buflen)
5953 {
5954 struct nfs_server *server = NFS_SERVER(inode);
5955 struct nfs_fattr fattr;
5956 struct nfs4_label label = {0, 0, buflen, buf};
5957
5958 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
5959 struct nfs4_getattr_arg arg = {
5960 .fh = NFS_FH(inode),
5961 .bitmask = bitmask,
5962 };
5963 struct nfs4_getattr_res res = {
5964 .fattr = &fattr,
5965 .label = &label,
5966 .server = server,
5967 };
5968 struct rpc_message msg = {
5969 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
5970 .rpc_argp = &arg,
5971 .rpc_resp = &res,
5972 };
5973 int ret;
5974
5975 nfs_fattr_init(&fattr);
5976
5977 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
5978 if (ret)
5979 return ret;
5980 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
5981 return -ENOENT;
5982 return label.len;
5983 }
5984
5985 static int nfs4_get_security_label(struct inode *inode, void *buf,
5986 size_t buflen)
5987 {
5988 struct nfs4_exception exception = {
5989 .interruptible = true,
5990 };
5991 int err;
5992
5993 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5994 return -EOPNOTSUPP;
5995
5996 do {
5997 err = _nfs4_get_security_label(inode, buf, buflen);
5998 trace_nfs4_get_security_label(inode, err);
5999 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6000 &exception);
6001 } while (exception.retry);
6002 return err;
6003 }
6004
6005 static int _nfs4_do_set_security_label(struct inode *inode,
6006 struct nfs4_label *ilabel,
6007 struct nfs_fattr *fattr,
6008 struct nfs4_label *olabel)
6009 {
6010
6011 struct iattr sattr = {0};
6012 struct nfs_server *server = NFS_SERVER(inode);
6013 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6014 struct nfs_setattrargs arg = {
6015 .fh = NFS_FH(inode),
6016 .iap = &sattr,
6017 .server = server,
6018 .bitmask = bitmask,
6019 .label = ilabel,
6020 };
6021 struct nfs_setattrres res = {
6022 .fattr = fattr,
6023 .label = olabel,
6024 .server = server,
6025 };
6026 struct rpc_message msg = {
6027 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
6028 .rpc_argp = &arg,
6029 .rpc_resp = &res,
6030 };
6031 int status;
6032
6033 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
6034
6035 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6036 if (status)
6037 dprintk("%s failed: %d\n", __func__, status);
6038
6039 return status;
6040 }
6041
6042 static int nfs4_do_set_security_label(struct inode *inode,
6043 struct nfs4_label *ilabel,
6044 struct nfs_fattr *fattr,
6045 struct nfs4_label *olabel)
6046 {
6047 struct nfs4_exception exception = { };
6048 int err;
6049
6050 do {
6051 err = _nfs4_do_set_security_label(inode, ilabel,
6052 fattr, olabel);
6053 trace_nfs4_set_security_label(inode, err);
6054 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6055 &exception);
6056 } while (exception.retry);
6057 return err;
6058 }
6059
6060 static int
6061 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
6062 {
6063 struct nfs4_label ilabel, *olabel = NULL;
6064 struct nfs_fattr fattr;
6065 int status;
6066
6067 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6068 return -EOPNOTSUPP;
6069
6070 nfs_fattr_init(&fattr);
6071
6072 ilabel.pi = 0;
6073 ilabel.lfs = 0;
6074 ilabel.label = (char *)buf;
6075 ilabel.len = buflen;
6076
6077 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
6078 if (IS_ERR(olabel)) {
6079 status = -PTR_ERR(olabel);
6080 goto out;
6081 }
6082
6083 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
6084 if (status == 0)
6085 nfs_setsecurity(inode, &fattr, olabel);
6086
6087 nfs4_label_free(olabel);
6088 out:
6089 return status;
6090 }
6091 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
6092
6093
6094 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
6095 nfs4_verifier *bootverf)
6096 {
6097 __be32 verf[2];
6098
6099 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
6100 /* An impossible timestamp guarantees this value
6101 * will never match a generated boot time. */
6102 verf[0] = cpu_to_be32(U32_MAX);
6103 verf[1] = cpu_to_be32(U32_MAX);
6104 } else {
6105 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6106 u64 ns = ktime_to_ns(nn->boot_time);
6107
6108 verf[0] = cpu_to_be32(ns >> 32);
6109 verf[1] = cpu_to_be32(ns);
6110 }
6111 memcpy(bootverf->data, verf, sizeof(bootverf->data));
6112 }
6113
6114 static size_t
6115 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen)
6116 {
6117 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6118 struct nfs_netns_client *nn_clp = nn->nfs_client;
6119 const char *id;
6120
6121 buf[0] = '\0';
6122
6123 if (nn_clp) {
6124 rcu_read_lock();
6125 id = rcu_dereference(nn_clp->identifier);
6126 if (id)
6127 strscpy(buf, id, buflen);
6128 rcu_read_unlock();
6129 }
6130
6131 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0')
6132 strscpy(buf, nfs4_client_id_uniquifier, buflen);
6133
6134 return strlen(buf);
6135 }
6136
6137 static int
6138 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
6139 {
6140 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6141 size_t buflen;
6142 size_t len;
6143 char *str;
6144
6145 if (clp->cl_owner_id != NULL)
6146 return 0;
6147
6148 rcu_read_lock();
6149 len = 14 +
6150 strlen(clp->cl_rpcclient->cl_nodename) +
6151 1 +
6152 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
6153 1;
6154 rcu_read_unlock();
6155
6156 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6157 if (buflen)
6158 len += buflen + 1;
6159
6160 if (len > NFS4_OPAQUE_LIMIT + 1)
6161 return -EINVAL;
6162
6163 /*
6164 * Since this string is allocated at mount time, and held until the
6165 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6166 * about a memory-reclaim deadlock.
6167 */
6168 str = kmalloc(len, GFP_KERNEL);
6169 if (!str)
6170 return -ENOMEM;
6171
6172 rcu_read_lock();
6173 if (buflen)
6174 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s",
6175 clp->cl_rpcclient->cl_nodename, buf,
6176 rpc_peeraddr2str(clp->cl_rpcclient,
6177 RPC_DISPLAY_ADDR));
6178 else
6179 scnprintf(str, len, "Linux NFSv4.0 %s/%s",
6180 clp->cl_rpcclient->cl_nodename,
6181 rpc_peeraddr2str(clp->cl_rpcclient,
6182 RPC_DISPLAY_ADDR));
6183 rcu_read_unlock();
6184
6185 clp->cl_owner_id = str;
6186 return 0;
6187 }
6188
6189 static int
6190 nfs4_init_uniform_client_string(struct nfs_client *clp)
6191 {
6192 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6193 size_t buflen;
6194 size_t len;
6195 char *str;
6196
6197 if (clp->cl_owner_id != NULL)
6198 return 0;
6199
6200 len = 10 + 10 + 1 + 10 + 1 +
6201 strlen(clp->cl_rpcclient->cl_nodename) + 1;
6202
6203 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6204 if (buflen)
6205 len += buflen + 1;
6206
6207 if (len > NFS4_OPAQUE_LIMIT + 1)
6208 return -EINVAL;
6209
6210 /*
6211 * Since this string is allocated at mount time, and held until the
6212 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6213 * about a memory-reclaim deadlock.
6214 */
6215 str = kmalloc(len, GFP_KERNEL);
6216 if (!str)
6217 return -ENOMEM;
6218
6219 if (buflen)
6220 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
6221 clp->rpc_ops->version, clp->cl_minorversion,
6222 buf, clp->cl_rpcclient->cl_nodename);
6223 else
6224 scnprintf(str, len, "Linux NFSv%u.%u %s",
6225 clp->rpc_ops->version, clp->cl_minorversion,
6226 clp->cl_rpcclient->cl_nodename);
6227 clp->cl_owner_id = str;
6228 return 0;
6229 }
6230
6231 /*
6232 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
6233 * services. Advertise one based on the address family of the
6234 * clientaddr.
6235 */
6236 static unsigned int
6237 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
6238 {
6239 if (strchr(clp->cl_ipaddr, ':') != NULL)
6240 return scnprintf(buf, len, "tcp6");
6241 else
6242 return scnprintf(buf, len, "tcp");
6243 }
6244
6245 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
6246 {
6247 struct nfs4_setclientid *sc = calldata;
6248
6249 if (task->tk_status == 0)
6250 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
6251 }
6252
6253 static const struct rpc_call_ops nfs4_setclientid_ops = {
6254 .rpc_call_done = nfs4_setclientid_done,
6255 };
6256
6257 /**
6258 * nfs4_proc_setclientid - Negotiate client ID
6259 * @clp: state data structure
6260 * @program: RPC program for NFSv4 callback service
6261 * @port: IP port number for NFS4 callback service
6262 * @cred: credential to use for this call
6263 * @res: where to place the result
6264 *
6265 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6266 */
6267 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
6268 unsigned short port, const struct cred *cred,
6269 struct nfs4_setclientid_res *res)
6270 {
6271 nfs4_verifier sc_verifier;
6272 struct nfs4_setclientid setclientid = {
6273 .sc_verifier = &sc_verifier,
6274 .sc_prog = program,
6275 .sc_clnt = clp,
6276 };
6277 struct rpc_message msg = {
6278 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
6279 .rpc_argp = &setclientid,
6280 .rpc_resp = res,
6281 .rpc_cred = cred,
6282 };
6283 struct rpc_task_setup task_setup_data = {
6284 .rpc_client = clp->cl_rpcclient,
6285 .rpc_message = &msg,
6286 .callback_ops = &nfs4_setclientid_ops,
6287 .callback_data = &setclientid,
6288 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
6289 };
6290 unsigned long now = jiffies;
6291 int status;
6292
6293 /* nfs_client_id4 */
6294 nfs4_init_boot_verifier(clp, &sc_verifier);
6295
6296 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
6297 status = nfs4_init_uniform_client_string(clp);
6298 else
6299 status = nfs4_init_nonuniform_client_string(clp);
6300
6301 if (status)
6302 goto out;
6303
6304 /* cb_client4 */
6305 setclientid.sc_netid_len =
6306 nfs4_init_callback_netid(clp,
6307 setclientid.sc_netid,
6308 sizeof(setclientid.sc_netid));
6309 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
6310 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
6311 clp->cl_ipaddr, port >> 8, port & 255);
6312
6313 dprintk("NFS call setclientid auth=%s, '%s'\n",
6314 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6315 clp->cl_owner_id);
6316
6317 status = nfs4_call_sync_custom(&task_setup_data);
6318 if (setclientid.sc_cred) {
6319 kfree(clp->cl_acceptor);
6320 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
6321 put_rpccred(setclientid.sc_cred);
6322 }
6323
6324 if (status == 0)
6325 do_renew_lease(clp, now);
6326 out:
6327 trace_nfs4_setclientid(clp, status);
6328 dprintk("NFS reply setclientid: %d\n", status);
6329 return status;
6330 }
6331
6332 /**
6333 * nfs4_proc_setclientid_confirm - Confirm client ID
6334 * @clp: state data structure
6335 * @arg: result of a previous SETCLIENTID
6336 * @cred: credential to use for this call
6337 *
6338 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6339 */
6340 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
6341 struct nfs4_setclientid_res *arg,
6342 const struct cred *cred)
6343 {
6344 struct rpc_message msg = {
6345 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
6346 .rpc_argp = arg,
6347 .rpc_cred = cred,
6348 };
6349 int status;
6350
6351 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
6352 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6353 clp->cl_clientid);
6354 status = rpc_call_sync(clp->cl_rpcclient, &msg,
6355 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
6356 trace_nfs4_setclientid_confirm(clp, status);
6357 dprintk("NFS reply setclientid_confirm: %d\n", status);
6358 return status;
6359 }
6360
6361 struct nfs4_delegreturndata {
6362 struct nfs4_delegreturnargs args;
6363 struct nfs4_delegreturnres res;
6364 struct nfs_fh fh;
6365 nfs4_stateid stateid;
6366 unsigned long timestamp;
6367 struct {
6368 struct nfs4_layoutreturn_args arg;
6369 struct nfs4_layoutreturn_res res;
6370 struct nfs4_xdr_opaque_data ld_private;
6371 u32 roc_barrier;
6372 bool roc;
6373 } lr;
6374 struct nfs_fattr fattr;
6375 int rpc_status;
6376 struct inode *inode;
6377 };
6378
6379 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
6380 {
6381 struct nfs4_delegreturndata *data = calldata;
6382 struct nfs4_exception exception = {
6383 .inode = data->inode,
6384 .stateid = &data->stateid,
6385 };
6386
6387 if (!nfs4_sequence_done(task, &data->res.seq_res))
6388 return;
6389
6390 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
6391
6392 /* Handle Layoutreturn errors */
6393 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
6394 &data->res.lr_ret) == -EAGAIN)
6395 goto out_restart;
6396
6397 switch (task->tk_status) {
6398 case 0:
6399 renew_lease(data->res.server, data->timestamp);
6400 break;
6401 case -NFS4ERR_ADMIN_REVOKED:
6402 case -NFS4ERR_DELEG_REVOKED:
6403 case -NFS4ERR_EXPIRED:
6404 nfs4_free_revoked_stateid(data->res.server,
6405 data->args.stateid,
6406 task->tk_msg.rpc_cred);
6407 fallthrough;
6408 case -NFS4ERR_BAD_STATEID:
6409 case -NFS4ERR_STALE_STATEID:
6410 case -ETIMEDOUT:
6411 task->tk_status = 0;
6412 break;
6413 case -NFS4ERR_OLD_STATEID:
6414 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
6415 nfs4_stateid_seqid_inc(&data->stateid);
6416 if (data->args.bitmask) {
6417 data->args.bitmask = NULL;
6418 data->res.fattr = NULL;
6419 }
6420 goto out_restart;
6421 case -NFS4ERR_ACCESS:
6422 if (data->args.bitmask) {
6423 data->args.bitmask = NULL;
6424 data->res.fattr = NULL;
6425 goto out_restart;
6426 }
6427 fallthrough;
6428 default:
6429 task->tk_status = nfs4_async_handle_exception(task,
6430 data->res.server, task->tk_status,
6431 &exception);
6432 if (exception.retry)
6433 goto out_restart;
6434 }
6435 nfs_delegation_mark_returned(data->inode, data->args.stateid);
6436 data->rpc_status = task->tk_status;
6437 return;
6438 out_restart:
6439 task->tk_status = 0;
6440 rpc_restart_call_prepare(task);
6441 }
6442
6443 static void nfs4_delegreturn_release(void *calldata)
6444 {
6445 struct nfs4_delegreturndata *data = calldata;
6446 struct inode *inode = data->inode;
6447
6448 if (data->lr.roc)
6449 pnfs_roc_release(&data->lr.arg, &data->lr.res,
6450 data->res.lr_ret);
6451 if (inode) {
6452 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
6453 nfs_iput_and_deactive(inode);
6454 }
6455 kfree(calldata);
6456 }
6457
6458 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
6459 {
6460 struct nfs4_delegreturndata *d_data;
6461 struct pnfs_layout_hdr *lo;
6462
6463 d_data = (struct nfs4_delegreturndata *)data;
6464
6465 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
6466 nfs4_sequence_done(task, &d_data->res.seq_res);
6467 return;
6468 }
6469
6470 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
6471 if (lo && !pnfs_layout_is_valid(lo)) {
6472 d_data->args.lr_args = NULL;
6473 d_data->res.lr_res = NULL;
6474 }
6475
6476 nfs4_setup_sequence(d_data->res.server->nfs_client,
6477 &d_data->args.seq_args,
6478 &d_data->res.seq_res,
6479 task);
6480 }
6481
6482 static const struct rpc_call_ops nfs4_delegreturn_ops = {
6483 .rpc_call_prepare = nfs4_delegreturn_prepare,
6484 .rpc_call_done = nfs4_delegreturn_done,
6485 .rpc_release = nfs4_delegreturn_release,
6486 };
6487
6488 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6489 {
6490 struct nfs4_delegreturndata *data;
6491 struct nfs_server *server = NFS_SERVER(inode);
6492 struct rpc_task *task;
6493 struct rpc_message msg = {
6494 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
6495 .rpc_cred = cred,
6496 };
6497 struct rpc_task_setup task_setup_data = {
6498 .rpc_client = server->client,
6499 .rpc_message = &msg,
6500 .callback_ops = &nfs4_delegreturn_ops,
6501 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
6502 };
6503 int status = 0;
6504
6505 data = kzalloc(sizeof(*data), GFP_NOFS);
6506 if (data == NULL)
6507 return -ENOMEM;
6508 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
6509
6510 nfs4_state_protect(server->nfs_client,
6511 NFS_SP4_MACH_CRED_CLEANUP,
6512 &task_setup_data.rpc_client, &msg);
6513
6514 data->args.fhandle = &data->fh;
6515 data->args.stateid = &data->stateid;
6516 data->args.bitmask = server->cache_consistency_bitmask;
6517 nfs4_bitmask_adjust(data->args.bitmask, inode, server, NULL);
6518 nfs_copy_fh(&data->fh, NFS_FH(inode));
6519 nfs4_stateid_copy(&data->stateid, stateid);
6520 data->res.fattr = &data->fattr;
6521 data->res.server = server;
6522 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
6523 data->lr.arg.ld_private = &data->lr.ld_private;
6524 nfs_fattr_init(data->res.fattr);
6525 data->timestamp = jiffies;
6526 data->rpc_status = 0;
6527 data->inode = nfs_igrab_and_active(inode);
6528 if (data->inode || issync) {
6529 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
6530 cred);
6531 if (data->lr.roc) {
6532 data->args.lr_args = &data->lr.arg;
6533 data->res.lr_res = &data->lr.res;
6534 }
6535 }
6536
6537 task_setup_data.callback_data = data;
6538 msg.rpc_argp = &data->args;
6539 msg.rpc_resp = &data->res;
6540 task = rpc_run_task(&task_setup_data);
6541 if (IS_ERR(task))
6542 return PTR_ERR(task);
6543 if (!issync)
6544 goto out;
6545 status = rpc_wait_for_completion_task(task);
6546 if (status != 0)
6547 goto out;
6548 status = data->rpc_status;
6549 out:
6550 rpc_put_task(task);
6551 return status;
6552 }
6553
6554 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6555 {
6556 struct nfs_server *server = NFS_SERVER(inode);
6557 struct nfs4_exception exception = { };
6558 int err;
6559 do {
6560 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
6561 trace_nfs4_delegreturn(inode, stateid, err);
6562 switch (err) {
6563 case -NFS4ERR_STALE_STATEID:
6564 case -NFS4ERR_EXPIRED:
6565 case 0:
6566 return 0;
6567 }
6568 err = nfs4_handle_exception(server, err, &exception);
6569 } while (exception.retry);
6570 return err;
6571 }
6572
6573 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6574 {
6575 struct inode *inode = state->inode;
6576 struct nfs_server *server = NFS_SERVER(inode);
6577 struct nfs_client *clp = server->nfs_client;
6578 struct nfs_lockt_args arg = {
6579 .fh = NFS_FH(inode),
6580 .fl = request,
6581 };
6582 struct nfs_lockt_res res = {
6583 .denied = request,
6584 };
6585 struct rpc_message msg = {
6586 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
6587 .rpc_argp = &arg,
6588 .rpc_resp = &res,
6589 .rpc_cred = state->owner->so_cred,
6590 };
6591 struct nfs4_lock_state *lsp;
6592 int status;
6593
6594 arg.lock_owner.clientid = clp->cl_clientid;
6595 status = nfs4_set_lock_state(state, request);
6596 if (status != 0)
6597 goto out;
6598 lsp = request->fl_u.nfs4_fl.owner;
6599 arg.lock_owner.id = lsp->ls_seqid.owner_id;
6600 arg.lock_owner.s_dev = server->s_dev;
6601 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6602 switch (status) {
6603 case 0:
6604 request->fl_type = F_UNLCK;
6605 break;
6606 case -NFS4ERR_DENIED:
6607 status = 0;
6608 }
6609 request->fl_ops->fl_release_private(request);
6610 request->fl_ops = NULL;
6611 out:
6612 return status;
6613 }
6614
6615 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6616 {
6617 struct nfs4_exception exception = {
6618 .interruptible = true,
6619 };
6620 int err;
6621
6622 do {
6623 err = _nfs4_proc_getlk(state, cmd, request);
6624 trace_nfs4_get_lock(request, state, cmd, err);
6625 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
6626 &exception);
6627 } while (exception.retry);
6628 return err;
6629 }
6630
6631 /*
6632 * Update the seqid of a lock stateid after receiving
6633 * NFS4ERR_OLD_STATEID
6634 */
6635 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
6636 struct nfs4_lock_state *lsp)
6637 {
6638 struct nfs4_state *state = lsp->ls_state;
6639 bool ret = false;
6640
6641 spin_lock(&state->state_lock);
6642 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
6643 goto out;
6644 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
6645 nfs4_stateid_seqid_inc(dst);
6646 else
6647 dst->seqid = lsp->ls_stateid.seqid;
6648 ret = true;
6649 out:
6650 spin_unlock(&state->state_lock);
6651 return ret;
6652 }
6653
6654 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
6655 struct nfs4_lock_state *lsp)
6656 {
6657 struct nfs4_state *state = lsp->ls_state;
6658 bool ret;
6659
6660 spin_lock(&state->state_lock);
6661 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
6662 nfs4_stateid_copy(dst, &lsp->ls_stateid);
6663 spin_unlock(&state->state_lock);
6664 return ret;
6665 }
6666
6667 struct nfs4_unlockdata {
6668 struct nfs_locku_args arg;
6669 struct nfs_locku_res res;
6670 struct nfs4_lock_state *lsp;
6671 struct nfs_open_context *ctx;
6672 struct nfs_lock_context *l_ctx;
6673 struct file_lock fl;
6674 struct nfs_server *server;
6675 unsigned long timestamp;
6676 };
6677
6678 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
6679 struct nfs_open_context *ctx,
6680 struct nfs4_lock_state *lsp,
6681 struct nfs_seqid *seqid)
6682 {
6683 struct nfs4_unlockdata *p;
6684 struct nfs4_state *state = lsp->ls_state;
6685 struct inode *inode = state->inode;
6686
6687 p = kzalloc(sizeof(*p), GFP_NOFS);
6688 if (p == NULL)
6689 return NULL;
6690 p->arg.fh = NFS_FH(inode);
6691 p->arg.fl = &p->fl;
6692 p->arg.seqid = seqid;
6693 p->res.seqid = seqid;
6694 p->lsp = lsp;
6695 /* Ensure we don't close file until we're done freeing locks! */
6696 p->ctx = get_nfs_open_context(ctx);
6697 p->l_ctx = nfs_get_lock_context(ctx);
6698 locks_init_lock(&p->fl);
6699 locks_copy_lock(&p->fl, fl);
6700 p->server = NFS_SERVER(inode);
6701 spin_lock(&state->state_lock);
6702 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
6703 spin_unlock(&state->state_lock);
6704 return p;
6705 }
6706
6707 static void nfs4_locku_release_calldata(void *data)
6708 {
6709 struct nfs4_unlockdata *calldata = data;
6710 nfs_free_seqid(calldata->arg.seqid);
6711 nfs4_put_lock_state(calldata->lsp);
6712 nfs_put_lock_context(calldata->l_ctx);
6713 put_nfs_open_context(calldata->ctx);
6714 kfree(calldata);
6715 }
6716
6717 static void nfs4_locku_done(struct rpc_task *task, void *data)
6718 {
6719 struct nfs4_unlockdata *calldata = data;
6720 struct nfs4_exception exception = {
6721 .inode = calldata->lsp->ls_state->inode,
6722 .stateid = &calldata->arg.stateid,
6723 };
6724
6725 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
6726 return;
6727 switch (task->tk_status) {
6728 case 0:
6729 renew_lease(calldata->server, calldata->timestamp);
6730 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
6731 if (nfs4_update_lock_stateid(calldata->lsp,
6732 &calldata->res.stateid))
6733 break;
6734 fallthrough;
6735 case -NFS4ERR_ADMIN_REVOKED:
6736 case -NFS4ERR_EXPIRED:
6737 nfs4_free_revoked_stateid(calldata->server,
6738 &calldata->arg.stateid,
6739 task->tk_msg.rpc_cred);
6740 fallthrough;
6741 case -NFS4ERR_BAD_STATEID:
6742 case -NFS4ERR_STALE_STATEID:
6743 if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
6744 calldata->lsp))
6745 rpc_restart_call_prepare(task);
6746 break;
6747 case -NFS4ERR_OLD_STATEID:
6748 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
6749 calldata->lsp))
6750 rpc_restart_call_prepare(task);
6751 break;
6752 default:
6753 task->tk_status = nfs4_async_handle_exception(task,
6754 calldata->server, task->tk_status,
6755 &exception);
6756 if (exception.retry)
6757 rpc_restart_call_prepare(task);
6758 }
6759 nfs_release_seqid(calldata->arg.seqid);
6760 }
6761
6762 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
6763 {
6764 struct nfs4_unlockdata *calldata = data;
6765
6766 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
6767 nfs_async_iocounter_wait(task, calldata->l_ctx))
6768 return;
6769
6770 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
6771 goto out_wait;
6772 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
6773 /* Note: exit _without_ running nfs4_locku_done */
6774 goto out_no_action;
6775 }
6776 calldata->timestamp = jiffies;
6777 if (nfs4_setup_sequence(calldata->server->nfs_client,
6778 &calldata->arg.seq_args,
6779 &calldata->res.seq_res,
6780 task) != 0)
6781 nfs_release_seqid(calldata->arg.seqid);
6782 return;
6783 out_no_action:
6784 task->tk_action = NULL;
6785 out_wait:
6786 nfs4_sequence_done(task, &calldata->res.seq_res);
6787 }
6788
6789 static const struct rpc_call_ops nfs4_locku_ops = {
6790 .rpc_call_prepare = nfs4_locku_prepare,
6791 .rpc_call_done = nfs4_locku_done,
6792 .rpc_release = nfs4_locku_release_calldata,
6793 };
6794
6795 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
6796 struct nfs_open_context *ctx,
6797 struct nfs4_lock_state *lsp,
6798 struct nfs_seqid *seqid)
6799 {
6800 struct nfs4_unlockdata *data;
6801 struct rpc_message msg = {
6802 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
6803 .rpc_cred = ctx->cred,
6804 };
6805 struct rpc_task_setup task_setup_data = {
6806 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
6807 .rpc_message = &msg,
6808 .callback_ops = &nfs4_locku_ops,
6809 .workqueue = nfsiod_workqueue,
6810 .flags = RPC_TASK_ASYNC,
6811 };
6812
6813 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
6814 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
6815
6816 /* Ensure this is an unlock - when canceling a lock, the
6817 * canceled lock is passed in, and it won't be an unlock.
6818 */
6819 fl->fl_type = F_UNLCK;
6820 if (fl->fl_flags & FL_CLOSE)
6821 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
6822
6823 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
6824 if (data == NULL) {
6825 nfs_free_seqid(seqid);
6826 return ERR_PTR(-ENOMEM);
6827 }
6828
6829 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
6830 msg.rpc_argp = &data->arg;
6831 msg.rpc_resp = &data->res;
6832 task_setup_data.callback_data = data;
6833 return rpc_run_task(&task_setup_data);
6834 }
6835
6836 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
6837 {
6838 struct inode *inode = state->inode;
6839 struct nfs4_state_owner *sp = state->owner;
6840 struct nfs_inode *nfsi = NFS_I(inode);
6841 struct nfs_seqid *seqid;
6842 struct nfs4_lock_state *lsp;
6843 struct rpc_task *task;
6844 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6845 int status = 0;
6846 unsigned char fl_flags = request->fl_flags;
6847
6848 status = nfs4_set_lock_state(state, request);
6849 /* Unlock _before_ we do the RPC call */
6850 request->fl_flags |= FL_EXISTS;
6851 /* Exclude nfs_delegation_claim_locks() */
6852 mutex_lock(&sp->so_delegreturn_mutex);
6853 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
6854 down_read(&nfsi->rwsem);
6855 if (locks_lock_inode_wait(inode, request) == -ENOENT) {
6856 up_read(&nfsi->rwsem);
6857 mutex_unlock(&sp->so_delegreturn_mutex);
6858 goto out;
6859 }
6860 up_read(&nfsi->rwsem);
6861 mutex_unlock(&sp->so_delegreturn_mutex);
6862 if (status != 0)
6863 goto out;
6864 /* Is this a delegated lock? */
6865 lsp = request->fl_u.nfs4_fl.owner;
6866 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
6867 goto out;
6868 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
6869 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
6870 status = -ENOMEM;
6871 if (IS_ERR(seqid))
6872 goto out;
6873 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
6874 status = PTR_ERR(task);
6875 if (IS_ERR(task))
6876 goto out;
6877 status = rpc_wait_for_completion_task(task);
6878 rpc_put_task(task);
6879 out:
6880 request->fl_flags = fl_flags;
6881 trace_nfs4_unlock(request, state, F_SETLK, status);
6882 return status;
6883 }
6884
6885 struct nfs4_lockdata {
6886 struct nfs_lock_args arg;
6887 struct nfs_lock_res res;
6888 struct nfs4_lock_state *lsp;
6889 struct nfs_open_context *ctx;
6890 struct file_lock fl;
6891 unsigned long timestamp;
6892 int rpc_status;
6893 int cancelled;
6894 struct nfs_server *server;
6895 };
6896
6897 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
6898 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
6899 gfp_t gfp_mask)
6900 {
6901 struct nfs4_lockdata *p;
6902 struct inode *inode = lsp->ls_state->inode;
6903 struct nfs_server *server = NFS_SERVER(inode);
6904 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6905
6906 p = kzalloc(sizeof(*p), gfp_mask);
6907 if (p == NULL)
6908 return NULL;
6909
6910 p->arg.fh = NFS_FH(inode);
6911 p->arg.fl = &p->fl;
6912 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
6913 if (IS_ERR(p->arg.open_seqid))
6914 goto out_free;
6915 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
6916 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
6917 if (IS_ERR(p->arg.lock_seqid))
6918 goto out_free_seqid;
6919 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
6920 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
6921 p->arg.lock_owner.s_dev = server->s_dev;
6922 p->res.lock_seqid = p->arg.lock_seqid;
6923 p->lsp = lsp;
6924 p->server = server;
6925 p->ctx = get_nfs_open_context(ctx);
6926 locks_init_lock(&p->fl);
6927 locks_copy_lock(&p->fl, fl);
6928 return p;
6929 out_free_seqid:
6930 nfs_free_seqid(p->arg.open_seqid);
6931 out_free:
6932 kfree(p);
6933 return NULL;
6934 }
6935
6936 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
6937 {
6938 struct nfs4_lockdata *data = calldata;
6939 struct nfs4_state *state = data->lsp->ls_state;
6940
6941 dprintk("%s: begin!\n", __func__);
6942 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
6943 goto out_wait;
6944 /* Do we need to do an open_to_lock_owner? */
6945 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
6946 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
6947 goto out_release_lock_seqid;
6948 }
6949 nfs4_stateid_copy(&data->arg.open_stateid,
6950 &state->open_stateid);
6951 data->arg.new_lock_owner = 1;
6952 data->res.open_seqid = data->arg.open_seqid;
6953 } else {
6954 data->arg.new_lock_owner = 0;
6955 nfs4_stateid_copy(&data->arg.lock_stateid,
6956 &data->lsp->ls_stateid);
6957 }
6958 if (!nfs4_valid_open_stateid(state)) {
6959 data->rpc_status = -EBADF;
6960 task->tk_action = NULL;
6961 goto out_release_open_seqid;
6962 }
6963 data->timestamp = jiffies;
6964 if (nfs4_setup_sequence(data->server->nfs_client,
6965 &data->arg.seq_args,
6966 &data->res.seq_res,
6967 task) == 0)
6968 return;
6969 out_release_open_seqid:
6970 nfs_release_seqid(data->arg.open_seqid);
6971 out_release_lock_seqid:
6972 nfs_release_seqid(data->arg.lock_seqid);
6973 out_wait:
6974 nfs4_sequence_done(task, &data->res.seq_res);
6975 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
6976 }
6977
6978 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
6979 {
6980 struct nfs4_lockdata *data = calldata;
6981 struct nfs4_lock_state *lsp = data->lsp;
6982
6983 dprintk("%s: begin!\n", __func__);
6984
6985 if (!nfs4_sequence_done(task, &data->res.seq_res))
6986 return;
6987
6988 data->rpc_status = task->tk_status;
6989 switch (task->tk_status) {
6990 case 0:
6991 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
6992 data->timestamp);
6993 if (data->arg.new_lock && !data->cancelled) {
6994 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
6995 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
6996 goto out_restart;
6997 }
6998 if (data->arg.new_lock_owner != 0) {
6999 nfs_confirm_seqid(&lsp->ls_seqid, 0);
7000 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
7001 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
7002 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
7003 goto out_restart;
7004 break;
7005 case -NFS4ERR_BAD_STATEID:
7006 case -NFS4ERR_OLD_STATEID:
7007 case -NFS4ERR_STALE_STATEID:
7008 case -NFS4ERR_EXPIRED:
7009 if (data->arg.new_lock_owner != 0) {
7010 if (!nfs4_stateid_match(&data->arg.open_stateid,
7011 &lsp->ls_state->open_stateid))
7012 goto out_restart;
7013 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
7014 &lsp->ls_stateid))
7015 goto out_restart;
7016 }
7017 out_done:
7018 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
7019 return;
7020 out_restart:
7021 if (!data->cancelled)
7022 rpc_restart_call_prepare(task);
7023 goto out_done;
7024 }
7025
7026 static void nfs4_lock_release(void *calldata)
7027 {
7028 struct nfs4_lockdata *data = calldata;
7029
7030 dprintk("%s: begin!\n", __func__);
7031 nfs_free_seqid(data->arg.open_seqid);
7032 if (data->cancelled && data->rpc_status == 0) {
7033 struct rpc_task *task;
7034 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
7035 data->arg.lock_seqid);
7036 if (!IS_ERR(task))
7037 rpc_put_task_async(task);
7038 dprintk("%s: cancelling lock!\n", __func__);
7039 } else
7040 nfs_free_seqid(data->arg.lock_seqid);
7041 nfs4_put_lock_state(data->lsp);
7042 put_nfs_open_context(data->ctx);
7043 kfree(data);
7044 dprintk("%s: done!\n", __func__);
7045 }
7046
7047 static const struct rpc_call_ops nfs4_lock_ops = {
7048 .rpc_call_prepare = nfs4_lock_prepare,
7049 .rpc_call_done = nfs4_lock_done,
7050 .rpc_release = nfs4_lock_release,
7051 };
7052
7053 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
7054 {
7055 switch (error) {
7056 case -NFS4ERR_ADMIN_REVOKED:
7057 case -NFS4ERR_EXPIRED:
7058 case -NFS4ERR_BAD_STATEID:
7059 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7060 if (new_lock_owner != 0 ||
7061 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
7062 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
7063 break;
7064 case -NFS4ERR_STALE_STATEID:
7065 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7066 nfs4_schedule_lease_recovery(server->nfs_client);
7067 }
7068 }
7069
7070 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
7071 {
7072 struct nfs4_lockdata *data;
7073 struct rpc_task *task;
7074 struct rpc_message msg = {
7075 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
7076 .rpc_cred = state->owner->so_cred,
7077 };
7078 struct rpc_task_setup task_setup_data = {
7079 .rpc_client = NFS_CLIENT(state->inode),
7080 .rpc_message = &msg,
7081 .callback_ops = &nfs4_lock_ops,
7082 .workqueue = nfsiod_workqueue,
7083 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
7084 };
7085 int ret;
7086
7087 dprintk("%s: begin!\n", __func__);
7088 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
7089 fl->fl_u.nfs4_fl.owner,
7090 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
7091 if (data == NULL)
7092 return -ENOMEM;
7093 if (IS_SETLKW(cmd))
7094 data->arg.block = 1;
7095 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1,
7096 recovery_type > NFS_LOCK_NEW);
7097 msg.rpc_argp = &data->arg;
7098 msg.rpc_resp = &data->res;
7099 task_setup_data.callback_data = data;
7100 if (recovery_type > NFS_LOCK_NEW) {
7101 if (recovery_type == NFS_LOCK_RECLAIM)
7102 data->arg.reclaim = NFS_LOCK_RECLAIM;
7103 } else
7104 data->arg.new_lock = 1;
7105 task = rpc_run_task(&task_setup_data);
7106 if (IS_ERR(task))
7107 return PTR_ERR(task);
7108 ret = rpc_wait_for_completion_task(task);
7109 if (ret == 0) {
7110 ret = data->rpc_status;
7111 if (ret)
7112 nfs4_handle_setlk_error(data->server, data->lsp,
7113 data->arg.new_lock_owner, ret);
7114 } else
7115 data->cancelled = true;
7116 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
7117 rpc_put_task(task);
7118 dprintk("%s: done, ret = %d!\n", __func__, ret);
7119 return ret;
7120 }
7121
7122 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
7123 {
7124 struct nfs_server *server = NFS_SERVER(state->inode);
7125 struct nfs4_exception exception = {
7126 .inode = state->inode,
7127 };
7128 int err;
7129
7130 do {
7131 /* Cache the lock if possible... */
7132 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7133 return 0;
7134 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
7135 if (err != -NFS4ERR_DELAY)
7136 break;
7137 nfs4_handle_exception(server, err, &exception);
7138 } while (exception.retry);
7139 return err;
7140 }
7141
7142 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
7143 {
7144 struct nfs_server *server = NFS_SERVER(state->inode);
7145 struct nfs4_exception exception = {
7146 .inode = state->inode,
7147 };
7148 int err;
7149
7150 err = nfs4_set_lock_state(state, request);
7151 if (err != 0)
7152 return err;
7153 if (!recover_lost_locks) {
7154 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
7155 return 0;
7156 }
7157 do {
7158 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7159 return 0;
7160 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
7161 switch (err) {
7162 default:
7163 goto out;
7164 case -NFS4ERR_GRACE:
7165 case -NFS4ERR_DELAY:
7166 nfs4_handle_exception(server, err, &exception);
7167 err = 0;
7168 }
7169 } while (exception.retry);
7170 out:
7171 return err;
7172 }
7173
7174 #if defined(CONFIG_NFS_V4_1)
7175 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
7176 {
7177 struct nfs4_lock_state *lsp;
7178 int status;
7179
7180 status = nfs4_set_lock_state(state, request);
7181 if (status != 0)
7182 return status;
7183 lsp = request->fl_u.nfs4_fl.owner;
7184 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
7185 test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
7186 return 0;
7187 return nfs4_lock_expired(state, request);
7188 }
7189 #endif
7190
7191 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7192 {
7193 struct nfs_inode *nfsi = NFS_I(state->inode);
7194 struct nfs4_state_owner *sp = state->owner;
7195 unsigned char fl_flags = request->fl_flags;
7196 int status;
7197
7198 request->fl_flags |= FL_ACCESS;
7199 status = locks_lock_inode_wait(state->inode, request);
7200 if (status < 0)
7201 goto out;
7202 mutex_lock(&sp->so_delegreturn_mutex);
7203 down_read(&nfsi->rwsem);
7204 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
7205 /* Yes: cache locks! */
7206 /* ...but avoid races with delegation recall... */
7207 request->fl_flags = fl_flags & ~FL_SLEEP;
7208 status = locks_lock_inode_wait(state->inode, request);
7209 up_read(&nfsi->rwsem);
7210 mutex_unlock(&sp->so_delegreturn_mutex);
7211 goto out;
7212 }
7213 up_read(&nfsi->rwsem);
7214 mutex_unlock(&sp->so_delegreturn_mutex);
7215 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
7216 out:
7217 request->fl_flags = fl_flags;
7218 return status;
7219 }
7220
7221 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7222 {
7223 struct nfs4_exception exception = {
7224 .state = state,
7225 .inode = state->inode,
7226 .interruptible = true,
7227 };
7228 int err;
7229
7230 do {
7231 err = _nfs4_proc_setlk(state, cmd, request);
7232 if (err == -NFS4ERR_DENIED)
7233 err = -EAGAIN;
7234 err = nfs4_handle_exception(NFS_SERVER(state->inode),
7235 err, &exception);
7236 } while (exception.retry);
7237 return err;
7238 }
7239
7240 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
7241 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
7242
7243 static int
7244 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
7245 struct file_lock *request)
7246 {
7247 int status = -ERESTARTSYS;
7248 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
7249
7250 while(!signalled()) {
7251 status = nfs4_proc_setlk(state, cmd, request);
7252 if ((status != -EAGAIN) || IS_SETLK(cmd))
7253 break;
7254 freezable_schedule_timeout_interruptible(timeout);
7255 timeout *= 2;
7256 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
7257 status = -ERESTARTSYS;
7258 }
7259 return status;
7260 }
7261
7262 #ifdef CONFIG_NFS_V4_1
7263 struct nfs4_lock_waiter {
7264 struct task_struct *task;
7265 struct inode *inode;
7266 struct nfs_lowner *owner;
7267 };
7268
7269 static int
7270 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
7271 {
7272 int ret;
7273 struct nfs4_lock_waiter *waiter = wait->private;
7274
7275 /* NULL key means to wake up everyone */
7276 if (key) {
7277 struct cb_notify_lock_args *cbnl = key;
7278 struct nfs_lowner *lowner = &cbnl->cbnl_owner,
7279 *wowner = waiter->owner;
7280
7281 /* Only wake if the callback was for the same owner. */
7282 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
7283 return 0;
7284
7285 /* Make sure it's for the right inode */
7286 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
7287 return 0;
7288 }
7289
7290 /* override "private" so we can use default_wake_function */
7291 wait->private = waiter->task;
7292 ret = woken_wake_function(wait, mode, flags, key);
7293 if (ret)
7294 list_del_init(&wait->entry);
7295 wait->private = waiter;
7296 return ret;
7297 }
7298
7299 static int
7300 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7301 {
7302 int status = -ERESTARTSYS;
7303 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
7304 struct nfs_server *server = NFS_SERVER(state->inode);
7305 struct nfs_client *clp = server->nfs_client;
7306 wait_queue_head_t *q = &clp->cl_lock_waitq;
7307 struct nfs_lowner owner = { .clientid = clp->cl_clientid,
7308 .id = lsp->ls_seqid.owner_id,
7309 .s_dev = server->s_dev };
7310 struct nfs4_lock_waiter waiter = { .task = current,
7311 .inode = state->inode,
7312 .owner = &owner};
7313 wait_queue_entry_t wait;
7314
7315 /* Don't bother with waitqueue if we don't expect a callback */
7316 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
7317 return nfs4_retry_setlk_simple(state, cmd, request);
7318
7319 init_wait(&wait);
7320 wait.private = &waiter;
7321 wait.func = nfs4_wake_lock_waiter;
7322
7323 while(!signalled()) {
7324 add_wait_queue(q, &wait);
7325 status = nfs4_proc_setlk(state, cmd, request);
7326 if ((status != -EAGAIN) || IS_SETLK(cmd)) {
7327 finish_wait(q, &wait);
7328 break;
7329 }
7330
7331 status = -ERESTARTSYS;
7332 freezer_do_not_count();
7333 wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT);
7334 freezer_count();
7335 finish_wait(q, &wait);
7336 }
7337
7338 return status;
7339 }
7340 #else /* !CONFIG_NFS_V4_1 */
7341 static inline int
7342 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7343 {
7344 return nfs4_retry_setlk_simple(state, cmd, request);
7345 }
7346 #endif
7347
7348 static int
7349 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
7350 {
7351 struct nfs_open_context *ctx;
7352 struct nfs4_state *state;
7353 int status;
7354
7355 /* verify open state */
7356 ctx = nfs_file_open_context(filp);
7357 state = ctx->state;
7358
7359 if (IS_GETLK(cmd)) {
7360 if (state != NULL)
7361 return nfs4_proc_getlk(state, F_GETLK, request);
7362 return 0;
7363 }
7364
7365 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
7366 return -EINVAL;
7367
7368 if (request->fl_type == F_UNLCK) {
7369 if (state != NULL)
7370 return nfs4_proc_unlck(state, cmd, request);
7371 return 0;
7372 }
7373
7374 if (state == NULL)
7375 return -ENOLCK;
7376
7377 if ((request->fl_flags & FL_POSIX) &&
7378 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
7379 return -ENOLCK;
7380
7381 /*
7382 * Don't rely on the VFS having checked the file open mode,
7383 * since it won't do this for flock() locks.
7384 */
7385 switch (request->fl_type) {
7386 case F_RDLCK:
7387 if (!(filp->f_mode & FMODE_READ))
7388 return -EBADF;
7389 break;
7390 case F_WRLCK:
7391 if (!(filp->f_mode & FMODE_WRITE))
7392 return -EBADF;
7393 }
7394
7395 status = nfs4_set_lock_state(state, request);
7396 if (status != 0)
7397 return status;
7398
7399 return nfs4_retry_setlk(state, cmd, request);
7400 }
7401
7402 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
7403 {
7404 struct nfs_server *server = NFS_SERVER(state->inode);
7405 int err;
7406
7407 err = nfs4_set_lock_state(state, fl);
7408 if (err != 0)
7409 return err;
7410 do {
7411 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
7412 if (err != -NFS4ERR_DELAY)
7413 break;
7414 ssleep(1);
7415 } while (err == -NFS4ERR_DELAY);
7416 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
7417 }
7418
7419 struct nfs_release_lockowner_data {
7420 struct nfs4_lock_state *lsp;
7421 struct nfs_server *server;
7422 struct nfs_release_lockowner_args args;
7423 struct nfs_release_lockowner_res res;
7424 unsigned long timestamp;
7425 };
7426
7427 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
7428 {
7429 struct nfs_release_lockowner_data *data = calldata;
7430 struct nfs_server *server = data->server;
7431 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
7432 &data->res.seq_res, task);
7433 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7434 data->timestamp = jiffies;
7435 }
7436
7437 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
7438 {
7439 struct nfs_release_lockowner_data *data = calldata;
7440 struct nfs_server *server = data->server;
7441
7442 nfs40_sequence_done(task, &data->res.seq_res);
7443
7444 switch (task->tk_status) {
7445 case 0:
7446 renew_lease(server, data->timestamp);
7447 break;
7448 case -NFS4ERR_STALE_CLIENTID:
7449 case -NFS4ERR_EXPIRED:
7450 nfs4_schedule_lease_recovery(server->nfs_client);
7451 break;
7452 case -NFS4ERR_LEASE_MOVED:
7453 case -NFS4ERR_DELAY:
7454 if (nfs4_async_handle_error(task, server,
7455 NULL, NULL) == -EAGAIN)
7456 rpc_restart_call_prepare(task);
7457 }
7458 }
7459
7460 static void nfs4_release_lockowner_release(void *calldata)
7461 {
7462 struct nfs_release_lockowner_data *data = calldata;
7463 nfs4_free_lock_state(data->server, data->lsp);
7464 kfree(calldata);
7465 }
7466
7467 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
7468 .rpc_call_prepare = nfs4_release_lockowner_prepare,
7469 .rpc_call_done = nfs4_release_lockowner_done,
7470 .rpc_release = nfs4_release_lockowner_release,
7471 };
7472
7473 static void
7474 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
7475 {
7476 struct nfs_release_lockowner_data *data;
7477 struct rpc_message msg = {
7478 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
7479 };
7480
7481 if (server->nfs_client->cl_mvops->minor_version != 0)
7482 return;
7483
7484 data = kmalloc(sizeof(*data), GFP_NOFS);
7485 if (!data)
7486 return;
7487 data->lsp = lsp;
7488 data->server = server;
7489 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7490 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
7491 data->args.lock_owner.s_dev = server->s_dev;
7492
7493 msg.rpc_argp = &data->args;
7494 msg.rpc_resp = &data->res;
7495 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
7496 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
7497 }
7498
7499 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
7500
7501 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
7502 struct dentry *unused, struct inode *inode,
7503 const char *key, const void *buf,
7504 size_t buflen, int flags)
7505 {
7506 return nfs4_proc_set_acl(inode, buf, buflen);
7507 }
7508
7509 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
7510 struct dentry *unused, struct inode *inode,
7511 const char *key, void *buf, size_t buflen)
7512 {
7513 return nfs4_proc_get_acl(inode, buf, buflen);
7514 }
7515
7516 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
7517 {
7518 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
7519 }
7520
7521 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
7522
7523 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
7524 struct dentry *unused, struct inode *inode,
7525 const char *key, const void *buf,
7526 size_t buflen, int flags)
7527 {
7528 if (security_ismaclabel(key))
7529 return nfs4_set_security_label(inode, buf, buflen);
7530
7531 return -EOPNOTSUPP;
7532 }
7533
7534 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
7535 struct dentry *unused, struct inode *inode,
7536 const char *key, void *buf, size_t buflen)
7537 {
7538 if (security_ismaclabel(key))
7539 return nfs4_get_security_label(inode, buf, buflen);
7540 return -EOPNOTSUPP;
7541 }
7542
7543 static ssize_t
7544 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7545 {
7546 int len = 0;
7547
7548 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
7549 len = security_inode_listsecurity(inode, list, list_len);
7550 if (len >= 0 && list_len && len > list_len)
7551 return -ERANGE;
7552 }
7553 return len;
7554 }
7555
7556 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
7557 .prefix = XATTR_SECURITY_PREFIX,
7558 .get = nfs4_xattr_get_nfs4_label,
7559 .set = nfs4_xattr_set_nfs4_label,
7560 };
7561
7562 #else
7563
7564 static ssize_t
7565 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7566 {
7567 return 0;
7568 }
7569
7570 #endif
7571
7572 #ifdef CONFIG_NFS_V4_2
7573 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
7574 struct dentry *unused, struct inode *inode,
7575 const char *key, const void *buf,
7576 size_t buflen, int flags)
7577 {
7578 struct nfs_access_entry cache;
7579 int ret;
7580
7581 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7582 return -EOPNOTSUPP;
7583
7584 /*
7585 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA*
7586 * flags right now. Handling of xattr operations use the normal
7587 * file read/write permissions.
7588 *
7589 * Just in case the server has other ideas (which RFC 8276 allows),
7590 * do a cached access check for the XA* flags to possibly avoid
7591 * doing an RPC and getting EACCES back.
7592 */
7593 if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
7594 if (!(cache.mask & NFS_ACCESS_XAWRITE))
7595 return -EACCES;
7596 }
7597
7598 if (buf == NULL) {
7599 ret = nfs42_proc_removexattr(inode, key);
7600 if (!ret)
7601 nfs4_xattr_cache_remove(inode, key);
7602 } else {
7603 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags);
7604 if (!ret)
7605 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen);
7606 }
7607
7608 return ret;
7609 }
7610
7611 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
7612 struct dentry *unused, struct inode *inode,
7613 const char *key, void *buf, size_t buflen)
7614 {
7615 struct nfs_access_entry cache;
7616 ssize_t ret;
7617
7618 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7619 return -EOPNOTSUPP;
7620
7621 if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
7622 if (!(cache.mask & NFS_ACCESS_XAREAD))
7623 return -EACCES;
7624 }
7625
7626 ret = nfs_revalidate_inode(NFS_SERVER(inode), inode);
7627 if (ret)
7628 return ret;
7629
7630 ret = nfs4_xattr_cache_get(inode, key, buf, buflen);
7631 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7632 return ret;
7633
7634 ret = nfs42_proc_getxattr(inode, key, buf, buflen);
7635
7636 return ret;
7637 }
7638
7639 static ssize_t
7640 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7641 {
7642 u64 cookie;
7643 bool eof;
7644 ssize_t ret, size;
7645 char *buf;
7646 size_t buflen;
7647 struct nfs_access_entry cache;
7648
7649 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7650 return 0;
7651
7652 if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
7653 if (!(cache.mask & NFS_ACCESS_XALIST))
7654 return 0;
7655 }
7656
7657 ret = nfs_revalidate_inode(NFS_SERVER(inode), inode);
7658 if (ret)
7659 return ret;
7660
7661 ret = nfs4_xattr_cache_list(inode, list, list_len);
7662 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7663 return ret;
7664
7665 cookie = 0;
7666 eof = false;
7667 buflen = list_len ? list_len : XATTR_LIST_MAX;
7668 buf = list_len ? list : NULL;
7669 size = 0;
7670
7671 while (!eof) {
7672 ret = nfs42_proc_listxattrs(inode, buf, buflen,
7673 &cookie, &eof);
7674 if (ret < 0)
7675 return ret;
7676
7677 if (list_len) {
7678 buf += ret;
7679 buflen -= ret;
7680 }
7681 size += ret;
7682 }
7683
7684 if (list_len)
7685 nfs4_xattr_cache_set_list(inode, list, size);
7686
7687 return size;
7688 }
7689
7690 #else
7691
7692 static ssize_t
7693 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7694 {
7695 return 0;
7696 }
7697 #endif /* CONFIG_NFS_V4_2 */
7698
7699 /*
7700 * nfs_fhget will use either the mounted_on_fileid or the fileid
7701 */
7702 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
7703 {
7704 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
7705 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
7706 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
7707 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
7708 return;
7709
7710 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
7711 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
7712 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
7713 fattr->nlink = 2;
7714 }
7715
7716 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7717 const struct qstr *name,
7718 struct nfs4_fs_locations *fs_locations,
7719 struct page *page)
7720 {
7721 struct nfs_server *server = NFS_SERVER(dir);
7722 u32 bitmask[3];
7723 struct nfs4_fs_locations_arg args = {
7724 .dir_fh = NFS_FH(dir),
7725 .name = name,
7726 .page = page,
7727 .bitmask = bitmask,
7728 };
7729 struct nfs4_fs_locations_res res = {
7730 .fs_locations = fs_locations,
7731 };
7732 struct rpc_message msg = {
7733 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7734 .rpc_argp = &args,
7735 .rpc_resp = &res,
7736 };
7737 int status;
7738
7739 dprintk("%s: start\n", __func__);
7740
7741 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
7742 bitmask[1] = nfs4_fattr_bitmap[1];
7743
7744 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
7745 * is not supported */
7746 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
7747 bitmask[0] &= ~FATTR4_WORD0_FILEID;
7748 else
7749 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
7750
7751 nfs_fattr_init(&fs_locations->fattr);
7752 fs_locations->server = server;
7753 fs_locations->nlocations = 0;
7754 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
7755 dprintk("%s: returned status = %d\n", __func__, status);
7756 return status;
7757 }
7758
7759 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7760 const struct qstr *name,
7761 struct nfs4_fs_locations *fs_locations,
7762 struct page *page)
7763 {
7764 struct nfs4_exception exception = {
7765 .interruptible = true,
7766 };
7767 int err;
7768 do {
7769 err = _nfs4_proc_fs_locations(client, dir, name,
7770 fs_locations, page);
7771 trace_nfs4_get_fs_locations(dir, name, err);
7772 err = nfs4_handle_exception(NFS_SERVER(dir), err,
7773 &exception);
7774 } while (exception.retry);
7775 return err;
7776 }
7777
7778 /*
7779 * This operation also signals the server that this client is
7780 * performing migration recovery. The server can stop returning
7781 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
7782 * appended to this compound to identify the client ID which is
7783 * performing recovery.
7784 */
7785 static int _nfs40_proc_get_locations(struct inode *inode,
7786 struct nfs4_fs_locations *locations,
7787 struct page *page, const struct cred *cred)
7788 {
7789 struct nfs_server *server = NFS_SERVER(inode);
7790 struct rpc_clnt *clnt = server->client;
7791 u32 bitmask[2] = {
7792 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7793 };
7794 struct nfs4_fs_locations_arg args = {
7795 .clientid = server->nfs_client->cl_clientid,
7796 .fh = NFS_FH(inode),
7797 .page = page,
7798 .bitmask = bitmask,
7799 .migration = 1, /* skip LOOKUP */
7800 .renew = 1, /* append RENEW */
7801 };
7802 struct nfs4_fs_locations_res res = {
7803 .fs_locations = locations,
7804 .migration = 1,
7805 .renew = 1,
7806 };
7807 struct rpc_message msg = {
7808 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7809 .rpc_argp = &args,
7810 .rpc_resp = &res,
7811 .rpc_cred = cred,
7812 };
7813 unsigned long now = jiffies;
7814 int status;
7815
7816 nfs_fattr_init(&locations->fattr);
7817 locations->server = server;
7818 locations->nlocations = 0;
7819
7820 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7821 status = nfs4_call_sync_sequence(clnt, server, &msg,
7822 &args.seq_args, &res.seq_res);
7823 if (status)
7824 return status;
7825
7826 renew_lease(server, now);
7827 return 0;
7828 }
7829
7830 #ifdef CONFIG_NFS_V4_1
7831
7832 /*
7833 * This operation also signals the server that this client is
7834 * performing migration recovery. The server can stop asserting
7835 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
7836 * performing this operation is identified in the SEQUENCE
7837 * operation in this compound.
7838 *
7839 * When the client supports GETATTR(fs_locations_info), it can
7840 * be plumbed in here.
7841 */
7842 static int _nfs41_proc_get_locations(struct inode *inode,
7843 struct nfs4_fs_locations *locations,
7844 struct page *page, const struct cred *cred)
7845 {
7846 struct nfs_server *server = NFS_SERVER(inode);
7847 struct rpc_clnt *clnt = server->client;
7848 u32 bitmask[2] = {
7849 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7850 };
7851 struct nfs4_fs_locations_arg args = {
7852 .fh = NFS_FH(inode),
7853 .page = page,
7854 .bitmask = bitmask,
7855 .migration = 1, /* skip LOOKUP */
7856 };
7857 struct nfs4_fs_locations_res res = {
7858 .fs_locations = locations,
7859 .migration = 1,
7860 };
7861 struct rpc_message msg = {
7862 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7863 .rpc_argp = &args,
7864 .rpc_resp = &res,
7865 .rpc_cred = cred,
7866 };
7867 int status;
7868
7869 nfs_fattr_init(&locations->fattr);
7870 locations->server = server;
7871 locations->nlocations = 0;
7872
7873 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7874 status = nfs4_call_sync_sequence(clnt, server, &msg,
7875 &args.seq_args, &res.seq_res);
7876 if (status == NFS4_OK &&
7877 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
7878 status = -NFS4ERR_LEASE_MOVED;
7879 return status;
7880 }
7881
7882 #endif /* CONFIG_NFS_V4_1 */
7883
7884 /**
7885 * nfs4_proc_get_locations - discover locations for a migrated FSID
7886 * @inode: inode on FSID that is migrating
7887 * @locations: result of query
7888 * @page: buffer
7889 * @cred: credential to use for this operation
7890 *
7891 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
7892 * operation failed, or a negative errno if a local error occurred.
7893 *
7894 * On success, "locations" is filled in, but if the server has
7895 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
7896 * asserted.
7897 *
7898 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
7899 * from this client that require migration recovery.
7900 */
7901 int nfs4_proc_get_locations(struct inode *inode,
7902 struct nfs4_fs_locations *locations,
7903 struct page *page, const struct cred *cred)
7904 {
7905 struct nfs_server *server = NFS_SERVER(inode);
7906 struct nfs_client *clp = server->nfs_client;
7907 const struct nfs4_mig_recovery_ops *ops =
7908 clp->cl_mvops->mig_recovery_ops;
7909 struct nfs4_exception exception = {
7910 .interruptible = true,
7911 };
7912 int status;
7913
7914 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
7915 (unsigned long long)server->fsid.major,
7916 (unsigned long long)server->fsid.minor,
7917 clp->cl_hostname);
7918 nfs_display_fhandle(NFS_FH(inode), __func__);
7919
7920 do {
7921 status = ops->get_locations(inode, locations, page, cred);
7922 if (status != -NFS4ERR_DELAY)
7923 break;
7924 nfs4_handle_exception(server, status, &exception);
7925 } while (exception.retry);
7926 return status;
7927 }
7928
7929 /*
7930 * This operation also signals the server that this client is
7931 * performing "lease moved" recovery. The server can stop
7932 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
7933 * is appended to this compound to identify the client ID which is
7934 * performing recovery.
7935 */
7936 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
7937 {
7938 struct nfs_server *server = NFS_SERVER(inode);
7939 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
7940 struct rpc_clnt *clnt = server->client;
7941 struct nfs4_fsid_present_arg args = {
7942 .fh = NFS_FH(inode),
7943 .clientid = clp->cl_clientid,
7944 .renew = 1, /* append RENEW */
7945 };
7946 struct nfs4_fsid_present_res res = {
7947 .renew = 1,
7948 };
7949 struct rpc_message msg = {
7950 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
7951 .rpc_argp = &args,
7952 .rpc_resp = &res,
7953 .rpc_cred = cred,
7954 };
7955 unsigned long now = jiffies;
7956 int status;
7957
7958 res.fh = nfs_alloc_fhandle();
7959 if (res.fh == NULL)
7960 return -ENOMEM;
7961
7962 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7963 status = nfs4_call_sync_sequence(clnt, server, &msg,
7964 &args.seq_args, &res.seq_res);
7965 nfs_free_fhandle(res.fh);
7966 if (status)
7967 return status;
7968
7969 do_renew_lease(clp, now);
7970 return 0;
7971 }
7972
7973 #ifdef CONFIG_NFS_V4_1
7974
7975 /*
7976 * This operation also signals the server that this client is
7977 * performing "lease moved" recovery. The server can stop asserting
7978 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
7979 * this operation is identified in the SEQUENCE operation in this
7980 * compound.
7981 */
7982 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
7983 {
7984 struct nfs_server *server = NFS_SERVER(inode);
7985 struct rpc_clnt *clnt = server->client;
7986 struct nfs4_fsid_present_arg args = {
7987 .fh = NFS_FH(inode),
7988 };
7989 struct nfs4_fsid_present_res res = {
7990 };
7991 struct rpc_message msg = {
7992 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
7993 .rpc_argp = &args,
7994 .rpc_resp = &res,
7995 .rpc_cred = cred,
7996 };
7997 int status;
7998
7999 res.fh = nfs_alloc_fhandle();
8000 if (res.fh == NULL)
8001 return -ENOMEM;
8002
8003 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8004 status = nfs4_call_sync_sequence(clnt, server, &msg,
8005 &args.seq_args, &res.seq_res);
8006 nfs_free_fhandle(res.fh);
8007 if (status == NFS4_OK &&
8008 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8009 status = -NFS4ERR_LEASE_MOVED;
8010 return status;
8011 }
8012
8013 #endif /* CONFIG_NFS_V4_1 */
8014
8015 /**
8016 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
8017 * @inode: inode on FSID to check
8018 * @cred: credential to use for this operation
8019 *
8020 * Server indicates whether the FSID is present, moved, or not
8021 * recognized. This operation is necessary to clear a LEASE_MOVED
8022 * condition for this client ID.
8023 *
8024 * Returns NFS4_OK if the FSID is present on this server,
8025 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
8026 * NFS4ERR code if some error occurred on the server, or a
8027 * negative errno if a local failure occurred.
8028 */
8029 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
8030 {
8031 struct nfs_server *server = NFS_SERVER(inode);
8032 struct nfs_client *clp = server->nfs_client;
8033 const struct nfs4_mig_recovery_ops *ops =
8034 clp->cl_mvops->mig_recovery_ops;
8035 struct nfs4_exception exception = {
8036 .interruptible = true,
8037 };
8038 int status;
8039
8040 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8041 (unsigned long long)server->fsid.major,
8042 (unsigned long long)server->fsid.minor,
8043 clp->cl_hostname);
8044 nfs_display_fhandle(NFS_FH(inode), __func__);
8045
8046 do {
8047 status = ops->fsid_present(inode, cred);
8048 if (status != -NFS4ERR_DELAY)
8049 break;
8050 nfs4_handle_exception(server, status, &exception);
8051 } while (exception.retry);
8052 return status;
8053 }
8054
8055 /*
8056 * If 'use_integrity' is true and the state managment nfs_client
8057 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
8058 * and the machine credential as per RFC3530bis and RFC5661 Security
8059 * Considerations sections. Otherwise, just use the user cred with the
8060 * filesystem's rpc_client.
8061 */
8062 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8063 {
8064 int status;
8065 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
8066 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
8067 struct nfs4_secinfo_arg args = {
8068 .dir_fh = NFS_FH(dir),
8069 .name = name,
8070 };
8071 struct nfs4_secinfo_res res = {
8072 .flavors = flavors,
8073 };
8074 struct rpc_message msg = {
8075 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
8076 .rpc_argp = &args,
8077 .rpc_resp = &res,
8078 };
8079 struct nfs4_call_sync_data data = {
8080 .seq_server = NFS_SERVER(dir),
8081 .seq_args = &args.seq_args,
8082 .seq_res = &res.seq_res,
8083 };
8084 struct rpc_task_setup task_setup = {
8085 .rpc_client = clnt,
8086 .rpc_message = &msg,
8087 .callback_ops = clp->cl_mvops->call_sync_ops,
8088 .callback_data = &data,
8089 .flags = RPC_TASK_NO_ROUND_ROBIN,
8090 };
8091 const struct cred *cred = NULL;
8092
8093 if (use_integrity) {
8094 clnt = clp->cl_rpcclient;
8095 task_setup.rpc_client = clnt;
8096
8097 cred = nfs4_get_clid_cred(clp);
8098 msg.rpc_cred = cred;
8099 }
8100
8101 dprintk("NFS call secinfo %s\n", name->name);
8102
8103 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
8104 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
8105 status = nfs4_call_sync_custom(&task_setup);
8106
8107 dprintk("NFS reply secinfo: %d\n", status);
8108
8109 put_cred(cred);
8110 return status;
8111 }
8112
8113 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
8114 struct nfs4_secinfo_flavors *flavors)
8115 {
8116 struct nfs4_exception exception = {
8117 .interruptible = true,
8118 };
8119 int err;
8120 do {
8121 err = -NFS4ERR_WRONGSEC;
8122
8123 /* try to use integrity protection with machine cred */
8124 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
8125 err = _nfs4_proc_secinfo(dir, name, flavors, true);
8126
8127 /*
8128 * if unable to use integrity protection, or SECINFO with
8129 * integrity protection returns NFS4ERR_WRONGSEC (which is
8130 * disallowed by spec, but exists in deployed servers) use
8131 * the current filesystem's rpc_client and the user cred.
8132 */
8133 if (err == -NFS4ERR_WRONGSEC)
8134 err = _nfs4_proc_secinfo(dir, name, flavors, false);
8135
8136 trace_nfs4_secinfo(dir, name, err);
8137 err = nfs4_handle_exception(NFS_SERVER(dir), err,
8138 &exception);
8139 } while (exception.retry);
8140 return err;
8141 }
8142
8143 #ifdef CONFIG_NFS_V4_1
8144 /*
8145 * Check the exchange flags returned by the server for invalid flags, having
8146 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
8147 * DS flags set.
8148 */
8149 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
8150 {
8151 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
8152 goto out_inval;
8153 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
8154 goto out_inval;
8155 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
8156 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
8157 goto out_inval;
8158 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
8159 goto out_inval;
8160 return NFS_OK;
8161 out_inval:
8162 return -NFS4ERR_INVAL;
8163 }
8164
8165 static bool
8166 nfs41_same_server_scope(struct nfs41_server_scope *a,
8167 struct nfs41_server_scope *b)
8168 {
8169 if (a->server_scope_sz != b->server_scope_sz)
8170 return false;
8171 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
8172 }
8173
8174 static void
8175 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
8176 {
8177 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
8178 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
8179 struct nfs_client *clp = args->client;
8180
8181 switch (task->tk_status) {
8182 case -NFS4ERR_BADSESSION:
8183 case -NFS4ERR_DEADSESSION:
8184 nfs4_schedule_session_recovery(clp->cl_session,
8185 task->tk_status);
8186 }
8187 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
8188 res->dir != NFS4_CDFS4_BOTH) {
8189 rpc_task_close_connection(task);
8190 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
8191 rpc_restart_call(task);
8192 }
8193 }
8194
8195 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
8196 .rpc_call_done = nfs4_bind_one_conn_to_session_done,
8197 };
8198
8199 /*
8200 * nfs4_proc_bind_one_conn_to_session()
8201 *
8202 * The 4.1 client currently uses the same TCP connection for the
8203 * fore and backchannel.
8204 */
8205 static
8206 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
8207 struct rpc_xprt *xprt,
8208 struct nfs_client *clp,
8209 const struct cred *cred)
8210 {
8211 int status;
8212 struct nfs41_bind_conn_to_session_args args = {
8213 .client = clp,
8214 .dir = NFS4_CDFC4_FORE_OR_BOTH,
8215 .retries = 0,
8216 };
8217 struct nfs41_bind_conn_to_session_res res;
8218 struct rpc_message msg = {
8219 .rpc_proc =
8220 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
8221 .rpc_argp = &args,
8222 .rpc_resp = &res,
8223 .rpc_cred = cred,
8224 };
8225 struct rpc_task_setup task_setup_data = {
8226 .rpc_client = clnt,
8227 .rpc_xprt = xprt,
8228 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
8229 .rpc_message = &msg,
8230 .flags = RPC_TASK_TIMEOUT,
8231 };
8232 struct rpc_task *task;
8233
8234 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
8235 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
8236 args.dir = NFS4_CDFC4_FORE;
8237
8238 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
8239 if (xprt != rcu_access_pointer(clnt->cl_xprt))
8240 args.dir = NFS4_CDFC4_FORE;
8241
8242 task = rpc_run_task(&task_setup_data);
8243 if (!IS_ERR(task)) {
8244 status = task->tk_status;
8245 rpc_put_task(task);
8246 } else
8247 status = PTR_ERR(task);
8248 trace_nfs4_bind_conn_to_session(clp, status);
8249 if (status == 0) {
8250 if (memcmp(res.sessionid.data,
8251 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
8252 dprintk("NFS: %s: Session ID mismatch\n", __func__);
8253 return -EIO;
8254 }
8255 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
8256 dprintk("NFS: %s: Unexpected direction from server\n",
8257 __func__);
8258 return -EIO;
8259 }
8260 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
8261 dprintk("NFS: %s: Server returned RDMA mode = true\n",
8262 __func__);
8263 return -EIO;
8264 }
8265 }
8266
8267 return status;
8268 }
8269
8270 struct rpc_bind_conn_calldata {
8271 struct nfs_client *clp;
8272 const struct cred *cred;
8273 };
8274
8275 static int
8276 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
8277 struct rpc_xprt *xprt,
8278 void *calldata)
8279 {
8280 struct rpc_bind_conn_calldata *p = calldata;
8281
8282 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
8283 }
8284
8285 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
8286 {
8287 struct rpc_bind_conn_calldata data = {
8288 .clp = clp,
8289 .cred = cred,
8290 };
8291 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
8292 nfs4_proc_bind_conn_to_session_callback, &data);
8293 }
8294
8295 /*
8296 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
8297 * and operations we'd like to see to enable certain features in the allow map
8298 */
8299 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
8300 .how = SP4_MACH_CRED,
8301 .enforce.u.words = {
8302 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8303 1 << (OP_EXCHANGE_ID - 32) |
8304 1 << (OP_CREATE_SESSION - 32) |
8305 1 << (OP_DESTROY_SESSION - 32) |
8306 1 << (OP_DESTROY_CLIENTID - 32)
8307 },
8308 .allow.u.words = {
8309 [0] = 1 << (OP_CLOSE) |
8310 1 << (OP_OPEN_DOWNGRADE) |
8311 1 << (OP_LOCKU) |
8312 1 << (OP_DELEGRETURN) |
8313 1 << (OP_COMMIT),
8314 [1] = 1 << (OP_SECINFO - 32) |
8315 1 << (OP_SECINFO_NO_NAME - 32) |
8316 1 << (OP_LAYOUTRETURN - 32) |
8317 1 << (OP_TEST_STATEID - 32) |
8318 1 << (OP_FREE_STATEID - 32) |
8319 1 << (OP_WRITE - 32)
8320 }
8321 };
8322
8323 /*
8324 * Select the state protection mode for client `clp' given the server results
8325 * from exchange_id in `sp'.
8326 *
8327 * Returns 0 on success, negative errno otherwise.
8328 */
8329 static int nfs4_sp4_select_mode(struct nfs_client *clp,
8330 struct nfs41_state_protection *sp)
8331 {
8332 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
8333 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8334 1 << (OP_EXCHANGE_ID - 32) |
8335 1 << (OP_CREATE_SESSION - 32) |
8336 1 << (OP_DESTROY_SESSION - 32) |
8337 1 << (OP_DESTROY_CLIENTID - 32)
8338 };
8339 unsigned long flags = 0;
8340 unsigned int i;
8341 int ret = 0;
8342
8343 if (sp->how == SP4_MACH_CRED) {
8344 /* Print state protect result */
8345 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
8346 for (i = 0; i <= LAST_NFS4_OP; i++) {
8347 if (test_bit(i, sp->enforce.u.longs))
8348 dfprintk(MOUNT, " enforce op %d\n", i);
8349 if (test_bit(i, sp->allow.u.longs))
8350 dfprintk(MOUNT, " allow op %d\n", i);
8351 }
8352
8353 /* make sure nothing is on enforce list that isn't supported */
8354 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
8355 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
8356 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8357 ret = -EINVAL;
8358 goto out;
8359 }
8360 }
8361
8362 /*
8363 * Minimal mode - state operations are allowed to use machine
8364 * credential. Note this already happens by default, so the
8365 * client doesn't have to do anything more than the negotiation.
8366 *
8367 * NOTE: we don't care if EXCHANGE_ID is in the list -
8368 * we're already using the machine cred for exchange_id
8369 * and will never use a different cred.
8370 */
8371 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
8372 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
8373 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
8374 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
8375 dfprintk(MOUNT, "sp4_mach_cred:\n");
8376 dfprintk(MOUNT, " minimal mode enabled\n");
8377 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
8378 } else {
8379 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8380 ret = -EINVAL;
8381 goto out;
8382 }
8383
8384 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
8385 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
8386 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
8387 test_bit(OP_LOCKU, sp->allow.u.longs)) {
8388 dfprintk(MOUNT, " cleanup mode enabled\n");
8389 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
8390 }
8391
8392 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
8393 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
8394 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
8395 }
8396
8397 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
8398 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
8399 dfprintk(MOUNT, " secinfo mode enabled\n");
8400 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
8401 }
8402
8403 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
8404 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
8405 dfprintk(MOUNT, " stateid mode enabled\n");
8406 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
8407 }
8408
8409 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
8410 dfprintk(MOUNT, " write mode enabled\n");
8411 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
8412 }
8413
8414 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
8415 dfprintk(MOUNT, " commit mode enabled\n");
8416 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
8417 }
8418 }
8419 out:
8420 clp->cl_sp4_flags = flags;
8421 return ret;
8422 }
8423
8424 struct nfs41_exchange_id_data {
8425 struct nfs41_exchange_id_res res;
8426 struct nfs41_exchange_id_args args;
8427 };
8428
8429 static void nfs4_exchange_id_release(void *data)
8430 {
8431 struct nfs41_exchange_id_data *cdata =
8432 (struct nfs41_exchange_id_data *)data;
8433
8434 nfs_put_client(cdata->args.client);
8435 kfree(cdata->res.impl_id);
8436 kfree(cdata->res.server_scope);
8437 kfree(cdata->res.server_owner);
8438 kfree(cdata);
8439 }
8440
8441 static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
8442 .rpc_release = nfs4_exchange_id_release,
8443 };
8444
8445 /*
8446 * _nfs4_proc_exchange_id()
8447 *
8448 * Wrapper for EXCHANGE_ID operation.
8449 */
8450 static struct rpc_task *
8451 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
8452 u32 sp4_how, struct rpc_xprt *xprt)
8453 {
8454 struct rpc_message msg = {
8455 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
8456 .rpc_cred = cred,
8457 };
8458 struct rpc_task_setup task_setup_data = {
8459 .rpc_client = clp->cl_rpcclient,
8460 .callback_ops = &nfs4_exchange_id_call_ops,
8461 .rpc_message = &msg,
8462 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
8463 };
8464 struct nfs41_exchange_id_data *calldata;
8465 int status;
8466
8467 if (!refcount_inc_not_zero(&clp->cl_count))
8468 return ERR_PTR(-EIO);
8469
8470 status = -ENOMEM;
8471 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8472 if (!calldata)
8473 goto out;
8474
8475 nfs4_init_boot_verifier(clp, &calldata->args.verifier);
8476
8477 status = nfs4_init_uniform_client_string(clp);
8478 if (status)
8479 goto out_calldata;
8480
8481 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
8482 GFP_NOFS);
8483 status = -ENOMEM;
8484 if (unlikely(calldata->res.server_owner == NULL))
8485 goto out_calldata;
8486
8487 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
8488 GFP_NOFS);
8489 if (unlikely(calldata->res.server_scope == NULL))
8490 goto out_server_owner;
8491
8492 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
8493 if (unlikely(calldata->res.impl_id == NULL))
8494 goto out_server_scope;
8495
8496 switch (sp4_how) {
8497 case SP4_NONE:
8498 calldata->args.state_protect.how = SP4_NONE;
8499 break;
8500
8501 case SP4_MACH_CRED:
8502 calldata->args.state_protect = nfs4_sp4_mach_cred_request;
8503 break;
8504
8505 default:
8506 /* unsupported! */
8507 WARN_ON_ONCE(1);
8508 status = -EINVAL;
8509 goto out_impl_id;
8510 }
8511 if (xprt) {
8512 task_setup_data.rpc_xprt = xprt;
8513 task_setup_data.flags |= RPC_TASK_SOFTCONN;
8514 memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
8515 sizeof(calldata->args.verifier.data));
8516 }
8517 calldata->args.client = clp;
8518 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
8519 EXCHGID4_FLAG_BIND_PRINC_STATEID;
8520 #ifdef CONFIG_NFS_V4_1_MIGRATION
8521 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
8522 #endif
8523 msg.rpc_argp = &calldata->args;
8524 msg.rpc_resp = &calldata->res;
8525 task_setup_data.callback_data = calldata;
8526
8527 return rpc_run_task(&task_setup_data);
8528
8529 out_impl_id:
8530 kfree(calldata->res.impl_id);
8531 out_server_scope:
8532 kfree(calldata->res.server_scope);
8533 out_server_owner:
8534 kfree(calldata->res.server_owner);
8535 out_calldata:
8536 kfree(calldata);
8537 out:
8538 nfs_put_client(clp);
8539 return ERR_PTR(status);
8540 }
8541
8542 /*
8543 * _nfs4_proc_exchange_id()
8544 *
8545 * Wrapper for EXCHANGE_ID operation.
8546 */
8547 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
8548 u32 sp4_how)
8549 {
8550 struct rpc_task *task;
8551 struct nfs41_exchange_id_args *argp;
8552 struct nfs41_exchange_id_res *resp;
8553 unsigned long now = jiffies;
8554 int status;
8555
8556 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
8557 if (IS_ERR(task))
8558 return PTR_ERR(task);
8559
8560 argp = task->tk_msg.rpc_argp;
8561 resp = task->tk_msg.rpc_resp;
8562 status = task->tk_status;
8563 if (status != 0)
8564 goto out;
8565
8566 status = nfs4_check_cl_exchange_flags(resp->flags,
8567 clp->cl_mvops->minor_version);
8568 if (status != 0)
8569 goto out;
8570
8571 status = nfs4_sp4_select_mode(clp, &resp->state_protect);
8572 if (status != 0)
8573 goto out;
8574
8575 do_renew_lease(clp, now);
8576
8577 clp->cl_clientid = resp->clientid;
8578 clp->cl_exchange_flags = resp->flags;
8579 clp->cl_seqid = resp->seqid;
8580 /* Client ID is not confirmed */
8581 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
8582 clear_bit(NFS4_SESSION_ESTABLISHED,
8583 &clp->cl_session->session_state);
8584
8585 if (clp->cl_serverscope != NULL &&
8586 !nfs41_same_server_scope(clp->cl_serverscope,
8587 resp->server_scope)) {
8588 dprintk("%s: server_scope mismatch detected\n",
8589 __func__);
8590 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
8591 }
8592
8593 swap(clp->cl_serverowner, resp->server_owner);
8594 swap(clp->cl_serverscope, resp->server_scope);
8595 swap(clp->cl_implid, resp->impl_id);
8596
8597 /* Save the EXCHANGE_ID verifier session trunk tests */
8598 memcpy(clp->cl_confirm.data, argp->verifier.data,
8599 sizeof(clp->cl_confirm.data));
8600 out:
8601 trace_nfs4_exchange_id(clp, status);
8602 rpc_put_task(task);
8603 return status;
8604 }
8605
8606 /*
8607 * nfs4_proc_exchange_id()
8608 *
8609 * Returns zero, a negative errno, or a negative NFS4ERR status code.
8610 *
8611 * Since the clientid has expired, all compounds using sessions
8612 * associated with the stale clientid will be returning
8613 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
8614 * be in some phase of session reset.
8615 *
8616 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
8617 */
8618 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
8619 {
8620 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
8621 int status;
8622
8623 /* try SP4_MACH_CRED if krb5i/p */
8624 if (authflavor == RPC_AUTH_GSS_KRB5I ||
8625 authflavor == RPC_AUTH_GSS_KRB5P) {
8626 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
8627 if (!status)
8628 return 0;
8629 }
8630
8631 /* try SP4_NONE */
8632 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
8633 }
8634
8635 /**
8636 * nfs4_test_session_trunk
8637 *
8638 * This is an add_xprt_test() test function called from
8639 * rpc_clnt_setup_test_and_add_xprt.
8640 *
8641 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
8642 * and is dereferrenced in nfs4_exchange_id_release
8643 *
8644 * Upon success, add the new transport to the rpc_clnt
8645 *
8646 * @clnt: struct rpc_clnt to get new transport
8647 * @xprt: the rpc_xprt to test
8648 * @data: call data for _nfs4_proc_exchange_id.
8649 */
8650 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
8651 void *data)
8652 {
8653 struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data;
8654 struct rpc_task *task;
8655 int status;
8656
8657 u32 sp4_how;
8658
8659 dprintk("--> %s try %s\n", __func__,
8660 xprt->address_strings[RPC_DISPLAY_ADDR]);
8661
8662 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
8663
8664 /* Test connection for session trunking. Async exchange_id call */
8665 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
8666 if (IS_ERR(task))
8667 return;
8668
8669 status = task->tk_status;
8670 if (status == 0)
8671 status = nfs4_detect_session_trunking(adata->clp,
8672 task->tk_msg.rpc_resp, xprt);
8673
8674 if (status == 0)
8675 rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
8676
8677 rpc_put_task(task);
8678 }
8679 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
8680
8681 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
8682 const struct cred *cred)
8683 {
8684 struct rpc_message msg = {
8685 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
8686 .rpc_argp = clp,
8687 .rpc_cred = cred,
8688 };
8689 int status;
8690
8691 status = rpc_call_sync(clp->cl_rpcclient, &msg,
8692 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
8693 trace_nfs4_destroy_clientid(clp, status);
8694 if (status)
8695 dprintk("NFS: Got error %d from the server %s on "
8696 "DESTROY_CLIENTID.", status, clp->cl_hostname);
8697 return status;
8698 }
8699
8700 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
8701 const struct cred *cred)
8702 {
8703 unsigned int loop;
8704 int ret;
8705
8706 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
8707 ret = _nfs4_proc_destroy_clientid(clp, cred);
8708 switch (ret) {
8709 case -NFS4ERR_DELAY:
8710 case -NFS4ERR_CLIENTID_BUSY:
8711 ssleep(1);
8712 break;
8713 default:
8714 return ret;
8715 }
8716 }
8717 return 0;
8718 }
8719
8720 int nfs4_destroy_clientid(struct nfs_client *clp)
8721 {
8722 const struct cred *cred;
8723 int ret = 0;
8724
8725 if (clp->cl_mvops->minor_version < 1)
8726 goto out;
8727 if (clp->cl_exchange_flags == 0)
8728 goto out;
8729 if (clp->cl_preserve_clid)
8730 goto out;
8731 cred = nfs4_get_clid_cred(clp);
8732 ret = nfs4_proc_destroy_clientid(clp, cred);
8733 put_cred(cred);
8734 switch (ret) {
8735 case 0:
8736 case -NFS4ERR_STALE_CLIENTID:
8737 clp->cl_exchange_flags = 0;
8738 }
8739 out:
8740 return ret;
8741 }
8742
8743 #endif /* CONFIG_NFS_V4_1 */
8744
8745 struct nfs4_get_lease_time_data {
8746 struct nfs4_get_lease_time_args *args;
8747 struct nfs4_get_lease_time_res *res;
8748 struct nfs_client *clp;
8749 };
8750
8751 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
8752 void *calldata)
8753 {
8754 struct nfs4_get_lease_time_data *data =
8755 (struct nfs4_get_lease_time_data *)calldata;
8756
8757 dprintk("--> %s\n", __func__);
8758 /* just setup sequence, do not trigger session recovery
8759 since we're invoked within one */
8760 nfs4_setup_sequence(data->clp,
8761 &data->args->la_seq_args,
8762 &data->res->lr_seq_res,
8763 task);
8764 dprintk("<-- %s\n", __func__);
8765 }
8766
8767 /*
8768 * Called from nfs4_state_manager thread for session setup, so don't recover
8769 * from sequence operation or clientid errors.
8770 */
8771 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
8772 {
8773 struct nfs4_get_lease_time_data *data =
8774 (struct nfs4_get_lease_time_data *)calldata;
8775
8776 dprintk("--> %s\n", __func__);
8777 if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
8778 return;
8779 switch (task->tk_status) {
8780 case -NFS4ERR_DELAY:
8781 case -NFS4ERR_GRACE:
8782 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
8783 rpc_delay(task, NFS4_POLL_RETRY_MIN);
8784 task->tk_status = 0;
8785 fallthrough;
8786 case -NFS4ERR_RETRY_UNCACHED_REP:
8787 rpc_restart_call_prepare(task);
8788 return;
8789 }
8790 dprintk("<-- %s\n", __func__);
8791 }
8792
8793 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
8794 .rpc_call_prepare = nfs4_get_lease_time_prepare,
8795 .rpc_call_done = nfs4_get_lease_time_done,
8796 };
8797
8798 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
8799 {
8800 struct nfs4_get_lease_time_args args;
8801 struct nfs4_get_lease_time_res res = {
8802 .lr_fsinfo = fsinfo,
8803 };
8804 struct nfs4_get_lease_time_data data = {
8805 .args = &args,
8806 .res = &res,
8807 .clp = clp,
8808 };
8809 struct rpc_message msg = {
8810 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
8811 .rpc_argp = &args,
8812 .rpc_resp = &res,
8813 };
8814 struct rpc_task_setup task_setup = {
8815 .rpc_client = clp->cl_rpcclient,
8816 .rpc_message = &msg,
8817 .callback_ops = &nfs4_get_lease_time_ops,
8818 .callback_data = &data,
8819 .flags = RPC_TASK_TIMEOUT,
8820 };
8821
8822 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
8823 return nfs4_call_sync_custom(&task_setup);
8824 }
8825
8826 #ifdef CONFIG_NFS_V4_1
8827
8828 /*
8829 * Initialize the values to be used by the client in CREATE_SESSION
8830 * If nfs4_init_session set the fore channel request and response sizes,
8831 * use them.
8832 *
8833 * Set the back channel max_resp_sz_cached to zero to force the client to
8834 * always set csa_cachethis to FALSE because the current implementation
8835 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
8836 */
8837 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
8838 struct rpc_clnt *clnt)
8839 {
8840 unsigned int max_rqst_sz, max_resp_sz;
8841 unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
8842 unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
8843
8844 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
8845 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
8846
8847 /* Fore channel attributes */
8848 args->fc_attrs.max_rqst_sz = max_rqst_sz;
8849 args->fc_attrs.max_resp_sz = max_resp_sz;
8850 args->fc_attrs.max_ops = NFS4_MAX_OPS;
8851 args->fc_attrs.max_reqs = max_session_slots;
8852
8853 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
8854 "max_ops=%u max_reqs=%u\n",
8855 __func__,
8856 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
8857 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
8858
8859 /* Back channel attributes */
8860 args->bc_attrs.max_rqst_sz = max_bc_payload;
8861 args->bc_attrs.max_resp_sz = max_bc_payload;
8862 args->bc_attrs.max_resp_sz_cached = 0;
8863 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
8864 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
8865 if (args->bc_attrs.max_reqs > max_bc_slots)
8866 args->bc_attrs.max_reqs = max_bc_slots;
8867
8868 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
8869 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
8870 __func__,
8871 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
8872 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
8873 args->bc_attrs.max_reqs);
8874 }
8875
8876 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
8877 struct nfs41_create_session_res *res)
8878 {
8879 struct nfs4_channel_attrs *sent = &args->fc_attrs;
8880 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
8881
8882 if (rcvd->max_resp_sz > sent->max_resp_sz)
8883 return -EINVAL;
8884 /*
8885 * Our requested max_ops is the minimum we need; we're not
8886 * prepared to break up compounds into smaller pieces than that.
8887 * So, no point even trying to continue if the server won't
8888 * cooperate:
8889 */
8890 if (rcvd->max_ops < sent->max_ops)
8891 return -EINVAL;
8892 if (rcvd->max_reqs == 0)
8893 return -EINVAL;
8894 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
8895 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
8896 return 0;
8897 }
8898
8899 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
8900 struct nfs41_create_session_res *res)
8901 {
8902 struct nfs4_channel_attrs *sent = &args->bc_attrs;
8903 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
8904
8905 if (!(res->flags & SESSION4_BACK_CHAN))
8906 goto out;
8907 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
8908 return -EINVAL;
8909 if (rcvd->max_resp_sz < sent->max_resp_sz)
8910 return -EINVAL;
8911 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
8912 return -EINVAL;
8913 if (rcvd->max_ops > sent->max_ops)
8914 return -EINVAL;
8915 if (rcvd->max_reqs > sent->max_reqs)
8916 return -EINVAL;
8917 out:
8918 return 0;
8919 }
8920
8921 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
8922 struct nfs41_create_session_res *res)
8923 {
8924 int ret;
8925
8926 ret = nfs4_verify_fore_channel_attrs(args, res);
8927 if (ret)
8928 return ret;
8929 return nfs4_verify_back_channel_attrs(args, res);
8930 }
8931
8932 static void nfs4_update_session(struct nfs4_session *session,
8933 struct nfs41_create_session_res *res)
8934 {
8935 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
8936 /* Mark client id and session as being confirmed */
8937 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
8938 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
8939 session->flags = res->flags;
8940 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
8941 if (res->flags & SESSION4_BACK_CHAN)
8942 memcpy(&session->bc_attrs, &res->bc_attrs,
8943 sizeof(session->bc_attrs));
8944 }
8945
8946 static int _nfs4_proc_create_session(struct nfs_client *clp,
8947 const struct cred *cred)
8948 {
8949 struct nfs4_session *session = clp->cl_session;
8950 struct nfs41_create_session_args args = {
8951 .client = clp,
8952 .clientid = clp->cl_clientid,
8953 .seqid = clp->cl_seqid,
8954 .cb_program = NFS4_CALLBACK,
8955 };
8956 struct nfs41_create_session_res res;
8957
8958 struct rpc_message msg = {
8959 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
8960 .rpc_argp = &args,
8961 .rpc_resp = &res,
8962 .rpc_cred = cred,
8963 };
8964 int status;
8965
8966 nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
8967 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
8968
8969 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
8970 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
8971 trace_nfs4_create_session(clp, status);
8972
8973 switch (status) {
8974 case -NFS4ERR_STALE_CLIENTID:
8975 case -NFS4ERR_DELAY:
8976 case -ETIMEDOUT:
8977 case -EACCES:
8978 case -EAGAIN:
8979 goto out;
8980 }
8981
8982 clp->cl_seqid++;
8983 if (!status) {
8984 /* Verify the session's negotiated channel_attrs values */
8985 status = nfs4_verify_channel_attrs(&args, &res);
8986 /* Increment the clientid slot sequence id */
8987 if (status)
8988 goto out;
8989 nfs4_update_session(session, &res);
8990 }
8991 out:
8992 return status;
8993 }
8994
8995 /*
8996 * Issues a CREATE_SESSION operation to the server.
8997 * It is the responsibility of the caller to verify the session is
8998 * expired before calling this routine.
8999 */
9000 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
9001 {
9002 int status;
9003 unsigned *ptr;
9004 struct nfs4_session *session = clp->cl_session;
9005
9006 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
9007
9008 status = _nfs4_proc_create_session(clp, cred);
9009 if (status)
9010 goto out;
9011
9012 /* Init or reset the session slot tables */
9013 status = nfs4_setup_session_slot_tables(session);
9014 dprintk("slot table setup returned %d\n", status);
9015 if (status)
9016 goto out;
9017
9018 ptr = (unsigned *)&session->sess_id.data[0];
9019 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
9020 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
9021 out:
9022 dprintk("<-- %s\n", __func__);
9023 return status;
9024 }
9025
9026 /*
9027 * Issue the over-the-wire RPC DESTROY_SESSION.
9028 * The caller must serialize access to this routine.
9029 */
9030 int nfs4_proc_destroy_session(struct nfs4_session *session,
9031 const struct cred *cred)
9032 {
9033 struct rpc_message msg = {
9034 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
9035 .rpc_argp = session,
9036 .rpc_cred = cred,
9037 };
9038 int status = 0;
9039
9040 dprintk("--> nfs4_proc_destroy_session\n");
9041
9042 /* session is still being setup */
9043 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
9044 return 0;
9045
9046 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9047 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9048 trace_nfs4_destroy_session(session->clp, status);
9049
9050 if (status)
9051 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
9052 "Session has been destroyed regardless...\n", status);
9053
9054 dprintk("<-- nfs4_proc_destroy_session\n");
9055 return status;
9056 }
9057
9058 /*
9059 * Renew the cl_session lease.
9060 */
9061 struct nfs4_sequence_data {
9062 struct nfs_client *clp;
9063 struct nfs4_sequence_args args;
9064 struct nfs4_sequence_res res;
9065 };
9066
9067 static void nfs41_sequence_release(void *data)
9068 {
9069 struct nfs4_sequence_data *calldata = data;
9070 struct nfs_client *clp = calldata->clp;
9071
9072 if (refcount_read(&clp->cl_count) > 1)
9073 nfs4_schedule_state_renewal(clp);
9074 nfs_put_client(clp);
9075 kfree(calldata);
9076 }
9077
9078 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9079 {
9080 switch(task->tk_status) {
9081 case -NFS4ERR_DELAY:
9082 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9083 return -EAGAIN;
9084 default:
9085 nfs4_schedule_lease_recovery(clp);
9086 }
9087 return 0;
9088 }
9089
9090 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
9091 {
9092 struct nfs4_sequence_data *calldata = data;
9093 struct nfs_client *clp = calldata->clp;
9094
9095 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
9096 return;
9097
9098 trace_nfs4_sequence(clp, task->tk_status);
9099 if (task->tk_status < 0) {
9100 dprintk("%s ERROR %d\n", __func__, task->tk_status);
9101 if (refcount_read(&clp->cl_count) == 1)
9102 goto out;
9103
9104 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
9105 rpc_restart_call_prepare(task);
9106 return;
9107 }
9108 }
9109 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
9110 out:
9111 dprintk("<-- %s\n", __func__);
9112 }
9113
9114 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
9115 {
9116 struct nfs4_sequence_data *calldata = data;
9117 struct nfs_client *clp = calldata->clp;
9118 struct nfs4_sequence_args *args;
9119 struct nfs4_sequence_res *res;
9120
9121 args = task->tk_msg.rpc_argp;
9122 res = task->tk_msg.rpc_resp;
9123
9124 nfs4_setup_sequence(clp, args, res, task);
9125 }
9126
9127 static const struct rpc_call_ops nfs41_sequence_ops = {
9128 .rpc_call_done = nfs41_sequence_call_done,
9129 .rpc_call_prepare = nfs41_sequence_prepare,
9130 .rpc_release = nfs41_sequence_release,
9131 };
9132
9133 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
9134 const struct cred *cred,
9135 struct nfs4_slot *slot,
9136 bool is_privileged)
9137 {
9138 struct nfs4_sequence_data *calldata;
9139 struct rpc_message msg = {
9140 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
9141 .rpc_cred = cred,
9142 };
9143 struct rpc_task_setup task_setup_data = {
9144 .rpc_client = clp->cl_rpcclient,
9145 .rpc_message = &msg,
9146 .callback_ops = &nfs41_sequence_ops,
9147 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
9148 };
9149 struct rpc_task *ret;
9150
9151 ret = ERR_PTR(-EIO);
9152 if (!refcount_inc_not_zero(&clp->cl_count))
9153 goto out_err;
9154
9155 ret = ERR_PTR(-ENOMEM);
9156 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9157 if (calldata == NULL)
9158 goto out_put_clp;
9159 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
9160 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
9161 msg.rpc_argp = &calldata->args;
9162 msg.rpc_resp = &calldata->res;
9163 calldata->clp = clp;
9164 task_setup_data.callback_data = calldata;
9165
9166 ret = rpc_run_task(&task_setup_data);
9167 if (IS_ERR(ret))
9168 goto out_err;
9169 return ret;
9170 out_put_clp:
9171 nfs_put_client(clp);
9172 out_err:
9173 nfs41_release_slot(slot);
9174 return ret;
9175 }
9176
9177 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
9178 {
9179 struct rpc_task *task;
9180 int ret = 0;
9181
9182 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
9183 return -EAGAIN;
9184 task = _nfs41_proc_sequence(clp, cred, NULL, false);
9185 if (IS_ERR(task))
9186 ret = PTR_ERR(task);
9187 else
9188 rpc_put_task_async(task);
9189 dprintk("<-- %s status=%d\n", __func__, ret);
9190 return ret;
9191 }
9192
9193 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
9194 {
9195 struct rpc_task *task;
9196 int ret;
9197
9198 task = _nfs41_proc_sequence(clp, cred, NULL, true);
9199 if (IS_ERR(task)) {
9200 ret = PTR_ERR(task);
9201 goto out;
9202 }
9203 ret = rpc_wait_for_completion_task(task);
9204 if (!ret)
9205 ret = task->tk_status;
9206 rpc_put_task(task);
9207 out:
9208 dprintk("<-- %s status=%d\n", __func__, ret);
9209 return ret;
9210 }
9211
9212 struct nfs4_reclaim_complete_data {
9213 struct nfs_client *clp;
9214 struct nfs41_reclaim_complete_args arg;
9215 struct nfs41_reclaim_complete_res res;
9216 };
9217
9218 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
9219 {
9220 struct nfs4_reclaim_complete_data *calldata = data;
9221
9222 nfs4_setup_sequence(calldata->clp,
9223 &calldata->arg.seq_args,
9224 &calldata->res.seq_res,
9225 task);
9226 }
9227
9228 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9229 {
9230 switch(task->tk_status) {
9231 case 0:
9232 wake_up_all(&clp->cl_lock_waitq);
9233 fallthrough;
9234 case -NFS4ERR_COMPLETE_ALREADY:
9235 case -NFS4ERR_WRONG_CRED: /* What to do here? */
9236 break;
9237 case -NFS4ERR_DELAY:
9238 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9239 fallthrough;
9240 case -NFS4ERR_RETRY_UNCACHED_REP:
9241 return -EAGAIN;
9242 case -NFS4ERR_BADSESSION:
9243 case -NFS4ERR_DEADSESSION:
9244 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9245 break;
9246 default:
9247 nfs4_schedule_lease_recovery(clp);
9248 }
9249 return 0;
9250 }
9251
9252 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
9253 {
9254 struct nfs4_reclaim_complete_data *calldata = data;
9255 struct nfs_client *clp = calldata->clp;
9256 struct nfs4_sequence_res *res = &calldata->res.seq_res;
9257
9258 dprintk("--> %s\n", __func__);
9259 if (!nfs41_sequence_done(task, res))
9260 return;
9261
9262 trace_nfs4_reclaim_complete(clp, task->tk_status);
9263 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
9264 rpc_restart_call_prepare(task);
9265 return;
9266 }
9267 dprintk("<-- %s\n", __func__);
9268 }
9269
9270 static void nfs4_free_reclaim_complete_data(void *data)
9271 {
9272 struct nfs4_reclaim_complete_data *calldata = data;
9273
9274 kfree(calldata);
9275 }
9276
9277 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
9278 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
9279 .rpc_call_done = nfs4_reclaim_complete_done,
9280 .rpc_release = nfs4_free_reclaim_complete_data,
9281 };
9282
9283 /*
9284 * Issue a global reclaim complete.
9285 */
9286 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
9287 const struct cred *cred)
9288 {
9289 struct nfs4_reclaim_complete_data *calldata;
9290 struct rpc_message msg = {
9291 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
9292 .rpc_cred = cred,
9293 };
9294 struct rpc_task_setup task_setup_data = {
9295 .rpc_client = clp->cl_rpcclient,
9296 .rpc_message = &msg,
9297 .callback_ops = &nfs4_reclaim_complete_call_ops,
9298 .flags = RPC_TASK_NO_ROUND_ROBIN,
9299 };
9300 int status = -ENOMEM;
9301
9302 dprintk("--> %s\n", __func__);
9303 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9304 if (calldata == NULL)
9305 goto out;
9306 calldata->clp = clp;
9307 calldata->arg.one_fs = 0;
9308
9309 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1);
9310 msg.rpc_argp = &calldata->arg;
9311 msg.rpc_resp = &calldata->res;
9312 task_setup_data.callback_data = calldata;
9313 status = nfs4_call_sync_custom(&task_setup_data);
9314 out:
9315 dprintk("<-- %s status=%d\n", __func__, status);
9316 return status;
9317 }
9318
9319 static void
9320 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
9321 {
9322 struct nfs4_layoutget *lgp = calldata;
9323 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
9324
9325 dprintk("--> %s\n", __func__);
9326 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
9327 &lgp->res.seq_res, task);
9328 dprintk("<-- %s\n", __func__);
9329 }
9330
9331 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
9332 {
9333 struct nfs4_layoutget *lgp = calldata;
9334
9335 dprintk("--> %s\n", __func__);
9336 nfs41_sequence_process(task, &lgp->res.seq_res);
9337 dprintk("<-- %s\n", __func__);
9338 }
9339
9340 static int
9341 nfs4_layoutget_handle_exception(struct rpc_task *task,
9342 struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
9343 {
9344 struct inode *inode = lgp->args.inode;
9345 struct nfs_server *server = NFS_SERVER(inode);
9346 struct pnfs_layout_hdr *lo;
9347 int nfs4err = task->tk_status;
9348 int err, status = 0;
9349 LIST_HEAD(head);
9350
9351 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
9352
9353 nfs4_sequence_free_slot(&lgp->res.seq_res);
9354
9355 switch (nfs4err) {
9356 case 0:
9357 goto out;
9358
9359 /*
9360 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
9361 * on the file. set tk_status to -ENODATA to tell upper layer to
9362 * retry go inband.
9363 */
9364 case -NFS4ERR_LAYOUTUNAVAILABLE:
9365 status = -ENODATA;
9366 goto out;
9367 /*
9368 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
9369 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
9370 */
9371 case -NFS4ERR_BADLAYOUT:
9372 status = -EOVERFLOW;
9373 goto out;
9374 /*
9375 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
9376 * (or clients) writing to the same RAID stripe except when
9377 * the minlength argument is 0 (see RFC5661 section 18.43.3).
9378 *
9379 * Treat it like we would RECALLCONFLICT -- we retry for a little
9380 * while, and then eventually give up.
9381 */
9382 case -NFS4ERR_LAYOUTTRYLATER:
9383 if (lgp->args.minlength == 0) {
9384 status = -EOVERFLOW;
9385 goto out;
9386 }
9387 status = -EBUSY;
9388 break;
9389 case -NFS4ERR_RECALLCONFLICT:
9390 status = -ERECALLCONFLICT;
9391 break;
9392 case -NFS4ERR_DELEG_REVOKED:
9393 case -NFS4ERR_ADMIN_REVOKED:
9394 case -NFS4ERR_EXPIRED:
9395 case -NFS4ERR_BAD_STATEID:
9396 exception->timeout = 0;
9397 spin_lock(&inode->i_lock);
9398 lo = NFS_I(inode)->layout;
9399 /* If the open stateid was bad, then recover it. */
9400 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
9401 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
9402 spin_unlock(&inode->i_lock);
9403 exception->state = lgp->args.ctx->state;
9404 exception->stateid = &lgp->args.stateid;
9405 break;
9406 }
9407
9408 /*
9409 * Mark the bad layout state as invalid, then retry
9410 */
9411 pnfs_mark_layout_stateid_invalid(lo, &head);
9412 spin_unlock(&inode->i_lock);
9413 nfs_commit_inode(inode, 0);
9414 pnfs_free_lseg_list(&head);
9415 status = -EAGAIN;
9416 goto out;
9417 }
9418
9419 err = nfs4_handle_exception(server, nfs4err, exception);
9420 if (!status) {
9421 if (exception->retry)
9422 status = -EAGAIN;
9423 else
9424 status = err;
9425 }
9426 out:
9427 dprintk("<-- %s\n", __func__);
9428 return status;
9429 }
9430
9431 size_t max_response_pages(struct nfs_server *server)
9432 {
9433 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
9434 return nfs_page_array_len(0, max_resp_sz);
9435 }
9436
9437 static void nfs4_layoutget_release(void *calldata)
9438 {
9439 struct nfs4_layoutget *lgp = calldata;
9440
9441 dprintk("--> %s\n", __func__);
9442 nfs4_sequence_free_slot(&lgp->res.seq_res);
9443 pnfs_layoutget_free(lgp);
9444 dprintk("<-- %s\n", __func__);
9445 }
9446
9447 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
9448 .rpc_call_prepare = nfs4_layoutget_prepare,
9449 .rpc_call_done = nfs4_layoutget_done,
9450 .rpc_release = nfs4_layoutget_release,
9451 };
9452
9453 struct pnfs_layout_segment *
9454 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
9455 {
9456 struct inode *inode = lgp->args.inode;
9457 struct nfs_server *server = NFS_SERVER(inode);
9458 struct rpc_task *task;
9459 struct rpc_message msg = {
9460 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
9461 .rpc_argp = &lgp->args,
9462 .rpc_resp = &lgp->res,
9463 .rpc_cred = lgp->cred,
9464 };
9465 struct rpc_task_setup task_setup_data = {
9466 .rpc_client = server->client,
9467 .rpc_message = &msg,
9468 .callback_ops = &nfs4_layoutget_call_ops,
9469 .callback_data = lgp,
9470 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
9471 };
9472 struct pnfs_layout_segment *lseg = NULL;
9473 struct nfs4_exception exception = {
9474 .inode = inode,
9475 .timeout = *timeout,
9476 };
9477 int status = 0;
9478
9479 dprintk("--> %s\n", __func__);
9480
9481 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
9482 pnfs_get_layout_hdr(NFS_I(inode)->layout);
9483
9484 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
9485
9486 task = rpc_run_task(&task_setup_data);
9487
9488 status = rpc_wait_for_completion_task(task);
9489 if (status != 0)
9490 goto out;
9491
9492 if (task->tk_status < 0) {
9493 status = nfs4_layoutget_handle_exception(task, lgp, &exception);
9494 *timeout = exception.timeout;
9495 } else if (lgp->res.layoutp->len == 0) {
9496 status = -EAGAIN;
9497 *timeout = nfs4_update_delay(&exception.timeout);
9498 } else
9499 lseg = pnfs_layout_process(lgp);
9500 out:
9501 trace_nfs4_layoutget(lgp->args.ctx,
9502 &lgp->args.range,
9503 &lgp->res.range,
9504 &lgp->res.stateid,
9505 status);
9506
9507 rpc_put_task(task);
9508 dprintk("<-- %s status=%d\n", __func__, status);
9509 if (status)
9510 return ERR_PTR(status);
9511 return lseg;
9512 }
9513
9514 static void
9515 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
9516 {
9517 struct nfs4_layoutreturn *lrp = calldata;
9518
9519 dprintk("--> %s\n", __func__);
9520 nfs4_setup_sequence(lrp->clp,
9521 &lrp->args.seq_args,
9522 &lrp->res.seq_res,
9523 task);
9524 if (!pnfs_layout_is_valid(lrp->args.layout))
9525 rpc_exit(task, 0);
9526 }
9527
9528 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
9529 {
9530 struct nfs4_layoutreturn *lrp = calldata;
9531 struct nfs_server *server;
9532
9533 dprintk("--> %s\n", __func__);
9534
9535 if (!nfs41_sequence_process(task, &lrp->res.seq_res))
9536 return;
9537
9538 /*
9539 * Was there an RPC level error? Assume the call succeeded,
9540 * and that we need to release the layout
9541 */
9542 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
9543 lrp->res.lrs_present = 0;
9544 return;
9545 }
9546
9547 server = NFS_SERVER(lrp->args.inode);
9548 switch (task->tk_status) {
9549 case -NFS4ERR_OLD_STATEID:
9550 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
9551 &lrp->args.range,
9552 lrp->args.inode))
9553 goto out_restart;
9554 fallthrough;
9555 default:
9556 task->tk_status = 0;
9557 fallthrough;
9558 case 0:
9559 break;
9560 case -NFS4ERR_DELAY:
9561 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
9562 break;
9563 goto out_restart;
9564 }
9565 dprintk("<-- %s\n", __func__);
9566 return;
9567 out_restart:
9568 task->tk_status = 0;
9569 nfs4_sequence_free_slot(&lrp->res.seq_res);
9570 rpc_restart_call_prepare(task);
9571 }
9572
9573 static void nfs4_layoutreturn_release(void *calldata)
9574 {
9575 struct nfs4_layoutreturn *lrp = calldata;
9576 struct pnfs_layout_hdr *lo = lrp->args.layout;
9577
9578 dprintk("--> %s\n", __func__);
9579 pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
9580 lrp->res.lrs_present ? &lrp->res.stateid : NULL);
9581 nfs4_sequence_free_slot(&lrp->res.seq_res);
9582 if (lrp->ld_private.ops && lrp->ld_private.ops->free)
9583 lrp->ld_private.ops->free(&lrp->ld_private);
9584 pnfs_put_layout_hdr(lrp->args.layout);
9585 nfs_iput_and_deactive(lrp->inode);
9586 put_cred(lrp->cred);
9587 kfree(calldata);
9588 dprintk("<-- %s\n", __func__);
9589 }
9590
9591 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
9592 .rpc_call_prepare = nfs4_layoutreturn_prepare,
9593 .rpc_call_done = nfs4_layoutreturn_done,
9594 .rpc_release = nfs4_layoutreturn_release,
9595 };
9596
9597 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
9598 {
9599 struct rpc_task *task;
9600 struct rpc_message msg = {
9601 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
9602 .rpc_argp = &lrp->args,
9603 .rpc_resp = &lrp->res,
9604 .rpc_cred = lrp->cred,
9605 };
9606 struct rpc_task_setup task_setup_data = {
9607 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
9608 .rpc_message = &msg,
9609 .callback_ops = &nfs4_layoutreturn_call_ops,
9610 .callback_data = lrp,
9611 };
9612 int status = 0;
9613
9614 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
9615 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
9616 &task_setup_data.rpc_client, &msg);
9617
9618 dprintk("--> %s\n", __func__);
9619 if (!sync) {
9620 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
9621 if (!lrp->inode) {
9622 nfs4_layoutreturn_release(lrp);
9623 return -EAGAIN;
9624 }
9625 task_setup_data.flags |= RPC_TASK_ASYNC;
9626 }
9627 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 0);
9628 task = rpc_run_task(&task_setup_data);
9629 if (IS_ERR(task))
9630 return PTR_ERR(task);
9631 if (sync)
9632 status = task->tk_status;
9633 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
9634 dprintk("<-- %s status=%d\n", __func__, status);
9635 rpc_put_task(task);
9636 return status;
9637 }
9638
9639 static int
9640 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
9641 struct pnfs_device *pdev,
9642 const struct cred *cred)
9643 {
9644 struct nfs4_getdeviceinfo_args args = {
9645 .pdev = pdev,
9646 .notify_types = NOTIFY_DEVICEID4_CHANGE |
9647 NOTIFY_DEVICEID4_DELETE,
9648 };
9649 struct nfs4_getdeviceinfo_res res = {
9650 .pdev = pdev,
9651 };
9652 struct rpc_message msg = {
9653 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
9654 .rpc_argp = &args,
9655 .rpc_resp = &res,
9656 .rpc_cred = cred,
9657 };
9658 int status;
9659
9660 dprintk("--> %s\n", __func__);
9661 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
9662 if (res.notification & ~args.notify_types)
9663 dprintk("%s: unsupported notification\n", __func__);
9664 if (res.notification != args.notify_types)
9665 pdev->nocache = 1;
9666
9667 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status);
9668
9669 dprintk("<-- %s status=%d\n", __func__, status);
9670
9671 return status;
9672 }
9673
9674 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
9675 struct pnfs_device *pdev,
9676 const struct cred *cred)
9677 {
9678 struct nfs4_exception exception = { };
9679 int err;
9680
9681 do {
9682 err = nfs4_handle_exception(server,
9683 _nfs4_proc_getdeviceinfo(server, pdev, cred),
9684 &exception);
9685 } while (exception.retry);
9686 return err;
9687 }
9688 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
9689
9690 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
9691 {
9692 struct nfs4_layoutcommit_data *data = calldata;
9693 struct nfs_server *server = NFS_SERVER(data->args.inode);
9694
9695 nfs4_setup_sequence(server->nfs_client,
9696 &data->args.seq_args,
9697 &data->res.seq_res,
9698 task);
9699 }
9700
9701 static void
9702 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
9703 {
9704 struct nfs4_layoutcommit_data *data = calldata;
9705 struct nfs_server *server = NFS_SERVER(data->args.inode);
9706
9707 if (!nfs41_sequence_done(task, &data->res.seq_res))
9708 return;
9709
9710 switch (task->tk_status) { /* Just ignore these failures */
9711 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
9712 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
9713 case -NFS4ERR_BADLAYOUT: /* no layout */
9714 case -NFS4ERR_GRACE: /* loca_recalim always false */
9715 task->tk_status = 0;
9716 case 0:
9717 break;
9718 default:
9719 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
9720 rpc_restart_call_prepare(task);
9721 return;
9722 }
9723 }
9724 }
9725
9726 static void nfs4_layoutcommit_release(void *calldata)
9727 {
9728 struct nfs4_layoutcommit_data *data = calldata;
9729
9730 pnfs_cleanup_layoutcommit(data);
9731 nfs_post_op_update_inode_force_wcc(data->args.inode,
9732 data->res.fattr);
9733 put_cred(data->cred);
9734 nfs_iput_and_deactive(data->inode);
9735 kfree(data);
9736 }
9737
9738 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
9739 .rpc_call_prepare = nfs4_layoutcommit_prepare,
9740 .rpc_call_done = nfs4_layoutcommit_done,
9741 .rpc_release = nfs4_layoutcommit_release,
9742 };
9743
9744 int
9745 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
9746 {
9747 struct rpc_message msg = {
9748 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
9749 .rpc_argp = &data->args,
9750 .rpc_resp = &data->res,
9751 .rpc_cred = data->cred,
9752 };
9753 struct rpc_task_setup task_setup_data = {
9754 .task = &data->task,
9755 .rpc_client = NFS_CLIENT(data->args.inode),
9756 .rpc_message = &msg,
9757 .callback_ops = &nfs4_layoutcommit_ops,
9758 .callback_data = data,
9759 };
9760 struct rpc_task *task;
9761 int status = 0;
9762
9763 dprintk("NFS: initiating layoutcommit call. sync %d "
9764 "lbw: %llu inode %lu\n", sync,
9765 data->args.lastbytewritten,
9766 data->args.inode->i_ino);
9767
9768 if (!sync) {
9769 data->inode = nfs_igrab_and_active(data->args.inode);
9770 if (data->inode == NULL) {
9771 nfs4_layoutcommit_release(data);
9772 return -EAGAIN;
9773 }
9774 task_setup_data.flags = RPC_TASK_ASYNC;
9775 }
9776 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
9777 task = rpc_run_task(&task_setup_data);
9778 if (IS_ERR(task))
9779 return PTR_ERR(task);
9780 if (sync)
9781 status = task->tk_status;
9782 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
9783 dprintk("%s: status %d\n", __func__, status);
9784 rpc_put_task(task);
9785 return status;
9786 }
9787
9788 /*
9789 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
9790 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
9791 */
9792 static int
9793 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
9794 struct nfs_fsinfo *info,
9795 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
9796 {
9797 struct nfs41_secinfo_no_name_args args = {
9798 .style = SECINFO_STYLE_CURRENT_FH,
9799 };
9800 struct nfs4_secinfo_res res = {
9801 .flavors = flavors,
9802 };
9803 struct rpc_message msg = {
9804 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
9805 .rpc_argp = &args,
9806 .rpc_resp = &res,
9807 };
9808 struct nfs4_call_sync_data data = {
9809 .seq_server = server,
9810 .seq_args = &args.seq_args,
9811 .seq_res = &res.seq_res,
9812 };
9813 struct rpc_task_setup task_setup = {
9814 .rpc_client = server->client,
9815 .rpc_message = &msg,
9816 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
9817 .callback_data = &data,
9818 .flags = RPC_TASK_NO_ROUND_ROBIN,
9819 };
9820 const struct cred *cred = NULL;
9821 int status;
9822
9823 if (use_integrity) {
9824 task_setup.rpc_client = server->nfs_client->cl_rpcclient;
9825
9826 cred = nfs4_get_clid_cred(server->nfs_client);
9827 msg.rpc_cred = cred;
9828 }
9829
9830 dprintk("--> %s\n", __func__);
9831 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
9832 status = nfs4_call_sync_custom(&task_setup);
9833 dprintk("<-- %s status=%d\n", __func__, status);
9834
9835 put_cred(cred);
9836
9837 return status;
9838 }
9839
9840 static int
9841 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
9842 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
9843 {
9844 struct nfs4_exception exception = {
9845 .interruptible = true,
9846 };
9847 int err;
9848 do {
9849 /* first try using integrity protection */
9850 err = -NFS4ERR_WRONGSEC;
9851
9852 /* try to use integrity protection with machine cred */
9853 if (_nfs4_is_integrity_protected(server->nfs_client))
9854 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
9855 flavors, true);
9856
9857 /*
9858 * if unable to use integrity protection, or SECINFO with
9859 * integrity protection returns NFS4ERR_WRONGSEC (which is
9860 * disallowed by spec, but exists in deployed servers) use
9861 * the current filesystem's rpc_client and the user cred.
9862 */
9863 if (err == -NFS4ERR_WRONGSEC)
9864 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
9865 flavors, false);
9866
9867 switch (err) {
9868 case 0:
9869 case -NFS4ERR_WRONGSEC:
9870 case -ENOTSUPP:
9871 goto out;
9872 default:
9873 err = nfs4_handle_exception(server, err, &exception);
9874 }
9875 } while (exception.retry);
9876 out:
9877 return err;
9878 }
9879
9880 static int
9881 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
9882 struct nfs_fsinfo *info)
9883 {
9884 int err;
9885 struct page *page;
9886 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
9887 struct nfs4_secinfo_flavors *flavors;
9888 struct nfs4_secinfo4 *secinfo;
9889 int i;
9890
9891 page = alloc_page(GFP_KERNEL);
9892 if (!page) {
9893 err = -ENOMEM;
9894 goto out;
9895 }
9896
9897 flavors = page_address(page);
9898 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
9899
9900 /*
9901 * Fall back on "guess and check" method if
9902 * the server doesn't support SECINFO_NO_NAME
9903 */
9904 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
9905 err = nfs4_find_root_sec(server, fhandle, info);
9906 goto out_freepage;
9907 }
9908 if (err)
9909 goto out_freepage;
9910
9911 for (i = 0; i < flavors->num_flavors; i++) {
9912 secinfo = &flavors->flavors[i];
9913
9914 switch (secinfo->flavor) {
9915 case RPC_AUTH_NULL:
9916 case RPC_AUTH_UNIX:
9917 case RPC_AUTH_GSS:
9918 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
9919 &secinfo->flavor_info);
9920 break;
9921 default:
9922 flavor = RPC_AUTH_MAXFLAVOR;
9923 break;
9924 }
9925
9926 if (!nfs_auth_info_match(&server->auth_info, flavor))
9927 flavor = RPC_AUTH_MAXFLAVOR;
9928
9929 if (flavor != RPC_AUTH_MAXFLAVOR) {
9930 err = nfs4_lookup_root_sec(server, fhandle,
9931 info, flavor);
9932 if (!err)
9933 break;
9934 }
9935 }
9936
9937 if (flavor == RPC_AUTH_MAXFLAVOR)
9938 err = -EPERM;
9939
9940 out_freepage:
9941 put_page(page);
9942 if (err == -EACCES)
9943 return -EPERM;
9944 out:
9945 return err;
9946 }
9947
9948 static int _nfs41_test_stateid(struct nfs_server *server,
9949 nfs4_stateid *stateid,
9950 const struct cred *cred)
9951 {
9952 int status;
9953 struct nfs41_test_stateid_args args = {
9954 .stateid = stateid,
9955 };
9956 struct nfs41_test_stateid_res res;
9957 struct rpc_message msg = {
9958 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
9959 .rpc_argp = &args,
9960 .rpc_resp = &res,
9961 .rpc_cred = cred,
9962 };
9963 struct rpc_clnt *rpc_client = server->client;
9964
9965 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
9966 &rpc_client, &msg);
9967
9968 dprintk("NFS call test_stateid %p\n", stateid);
9969 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
9970 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
9971 &args.seq_args, &res.seq_res);
9972 if (status != NFS_OK) {
9973 dprintk("NFS reply test_stateid: failed, %d\n", status);
9974 return status;
9975 }
9976 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
9977 return -res.status;
9978 }
9979
9980 static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
9981 int err, struct nfs4_exception *exception)
9982 {
9983 exception->retry = 0;
9984 switch(err) {
9985 case -NFS4ERR_DELAY:
9986 case -NFS4ERR_RETRY_UNCACHED_REP:
9987 nfs4_handle_exception(server, err, exception);
9988 break;
9989 case -NFS4ERR_BADSESSION:
9990 case -NFS4ERR_BADSLOT:
9991 case -NFS4ERR_BAD_HIGH_SLOT:
9992 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9993 case -NFS4ERR_DEADSESSION:
9994 nfs4_do_handle_exception(server, err, exception);
9995 }
9996 }
9997
9998 /**
9999 * nfs41_test_stateid - perform a TEST_STATEID operation
10000 *
10001 * @server: server / transport on which to perform the operation
10002 * @stateid: state ID to test
10003 * @cred: credential
10004 *
10005 * Returns NFS_OK if the server recognizes that "stateid" is valid.
10006 * Otherwise a negative NFS4ERR value is returned if the operation
10007 * failed or the state ID is not currently valid.
10008 */
10009 static int nfs41_test_stateid(struct nfs_server *server,
10010 nfs4_stateid *stateid,
10011 const struct cred *cred)
10012 {
10013 struct nfs4_exception exception = {
10014 .interruptible = true,
10015 };
10016 int err;
10017 do {
10018 err = _nfs41_test_stateid(server, stateid, cred);
10019 nfs4_handle_delay_or_session_error(server, err, &exception);
10020 } while (exception.retry);
10021 return err;
10022 }
10023
10024 struct nfs_free_stateid_data {
10025 struct nfs_server *server;
10026 struct nfs41_free_stateid_args args;
10027 struct nfs41_free_stateid_res res;
10028 };
10029
10030 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
10031 {
10032 struct nfs_free_stateid_data *data = calldata;
10033 nfs4_setup_sequence(data->server->nfs_client,
10034 &data->args.seq_args,
10035 &data->res.seq_res,
10036 task);
10037 }
10038
10039 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
10040 {
10041 struct nfs_free_stateid_data *data = calldata;
10042
10043 nfs41_sequence_done(task, &data->res.seq_res);
10044
10045 switch (task->tk_status) {
10046 case -NFS4ERR_DELAY:
10047 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
10048 rpc_restart_call_prepare(task);
10049 }
10050 }
10051
10052 static void nfs41_free_stateid_release(void *calldata)
10053 {
10054 kfree(calldata);
10055 }
10056
10057 static const struct rpc_call_ops nfs41_free_stateid_ops = {
10058 .rpc_call_prepare = nfs41_free_stateid_prepare,
10059 .rpc_call_done = nfs41_free_stateid_done,
10060 .rpc_release = nfs41_free_stateid_release,
10061 };
10062
10063 /**
10064 * nfs41_free_stateid - perform a FREE_STATEID operation
10065 *
10066 * @server: server / transport on which to perform the operation
10067 * @stateid: state ID to release
10068 * @cred: credential
10069 * @privileged: set to true if this call needs to be privileged
10070 *
10071 * Note: this function is always asynchronous.
10072 */
10073 static int nfs41_free_stateid(struct nfs_server *server,
10074 const nfs4_stateid *stateid,
10075 const struct cred *cred,
10076 bool privileged)
10077 {
10078 struct rpc_message msg = {
10079 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
10080 .rpc_cred = cred,
10081 };
10082 struct rpc_task_setup task_setup = {
10083 .rpc_client = server->client,
10084 .rpc_message = &msg,
10085 .callback_ops = &nfs41_free_stateid_ops,
10086 .flags = RPC_TASK_ASYNC,
10087 };
10088 struct nfs_free_stateid_data *data;
10089 struct rpc_task *task;
10090
10091 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10092 &task_setup.rpc_client, &msg);
10093
10094 dprintk("NFS call free_stateid %p\n", stateid);
10095 data = kmalloc(sizeof(*data), GFP_NOFS);
10096 if (!data)
10097 return -ENOMEM;
10098 data->server = server;
10099 nfs4_stateid_copy(&data->args.stateid, stateid);
10100
10101 task_setup.callback_data = data;
10102
10103 msg.rpc_argp = &data->args;
10104 msg.rpc_resp = &data->res;
10105 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged);
10106 task = rpc_run_task(&task_setup);
10107 if (IS_ERR(task))
10108 return PTR_ERR(task);
10109 rpc_put_task(task);
10110 return 0;
10111 }
10112
10113 static void
10114 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
10115 {
10116 const struct cred *cred = lsp->ls_state->owner->so_cred;
10117
10118 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
10119 nfs4_free_lock_state(server, lsp);
10120 }
10121
10122 static bool nfs41_match_stateid(const nfs4_stateid *s1,
10123 const nfs4_stateid *s2)
10124 {
10125 if (s1->type != s2->type)
10126 return false;
10127
10128 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
10129 return false;
10130
10131 if (s1->seqid == s2->seqid)
10132 return true;
10133
10134 return s1->seqid == 0 || s2->seqid == 0;
10135 }
10136
10137 #endif /* CONFIG_NFS_V4_1 */
10138
10139 static bool nfs4_match_stateid(const nfs4_stateid *s1,
10140 const nfs4_stateid *s2)
10141 {
10142 return nfs4_stateid_match(s1, s2);
10143 }
10144
10145
10146 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
10147 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10148 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10149 .recover_open = nfs4_open_reclaim,
10150 .recover_lock = nfs4_lock_reclaim,
10151 .establish_clid = nfs4_init_clientid,
10152 .detect_trunking = nfs40_discover_server_trunking,
10153 };
10154
10155 #if defined(CONFIG_NFS_V4_1)
10156 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
10157 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10158 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10159 .recover_open = nfs4_open_reclaim,
10160 .recover_lock = nfs4_lock_reclaim,
10161 .establish_clid = nfs41_init_clientid,
10162 .reclaim_complete = nfs41_proc_reclaim_complete,
10163 .detect_trunking = nfs41_discover_server_trunking,
10164 };
10165 #endif /* CONFIG_NFS_V4_1 */
10166
10167 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
10168 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10169 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10170 .recover_open = nfs40_open_expired,
10171 .recover_lock = nfs4_lock_expired,
10172 .establish_clid = nfs4_init_clientid,
10173 };
10174
10175 #if defined(CONFIG_NFS_V4_1)
10176 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
10177 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10178 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10179 .recover_open = nfs41_open_expired,
10180 .recover_lock = nfs41_lock_expired,
10181 .establish_clid = nfs41_init_clientid,
10182 };
10183 #endif /* CONFIG_NFS_V4_1 */
10184
10185 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
10186 .sched_state_renewal = nfs4_proc_async_renew,
10187 .get_state_renewal_cred = nfs4_get_renew_cred,
10188 .renew_lease = nfs4_proc_renew,
10189 };
10190
10191 #if defined(CONFIG_NFS_V4_1)
10192 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
10193 .sched_state_renewal = nfs41_proc_async_sequence,
10194 .get_state_renewal_cred = nfs4_get_machine_cred,
10195 .renew_lease = nfs4_proc_sequence,
10196 };
10197 #endif
10198
10199 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
10200 .get_locations = _nfs40_proc_get_locations,
10201 .fsid_present = _nfs40_proc_fsid_present,
10202 };
10203
10204 #if defined(CONFIG_NFS_V4_1)
10205 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
10206 .get_locations = _nfs41_proc_get_locations,
10207 .fsid_present = _nfs41_proc_fsid_present,
10208 };
10209 #endif /* CONFIG_NFS_V4_1 */
10210
10211 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
10212 .minor_version = 0,
10213 .init_caps = NFS_CAP_READDIRPLUS
10214 | NFS_CAP_ATOMIC_OPEN
10215 | NFS_CAP_POSIX_LOCK,
10216 .init_client = nfs40_init_client,
10217 .shutdown_client = nfs40_shutdown_client,
10218 .match_stateid = nfs4_match_stateid,
10219 .find_root_sec = nfs4_find_root_sec,
10220 .free_lock_state = nfs4_release_lockowner,
10221 .test_and_free_expired = nfs40_test_and_free_expired_stateid,
10222 .alloc_seqid = nfs_alloc_seqid,
10223 .call_sync_ops = &nfs40_call_sync_ops,
10224 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
10225 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
10226 .state_renewal_ops = &nfs40_state_renewal_ops,
10227 .mig_recovery_ops = &nfs40_mig_recovery_ops,
10228 };
10229
10230 #if defined(CONFIG_NFS_V4_1)
10231 static struct nfs_seqid *
10232 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
10233 {
10234 return NULL;
10235 }
10236
10237 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
10238 .minor_version = 1,
10239 .init_caps = NFS_CAP_READDIRPLUS
10240 | NFS_CAP_ATOMIC_OPEN
10241 | NFS_CAP_POSIX_LOCK
10242 | NFS_CAP_STATEID_NFSV41
10243 | NFS_CAP_ATOMIC_OPEN_V1
10244 | NFS_CAP_LGOPEN,
10245 .init_client = nfs41_init_client,
10246 .shutdown_client = nfs41_shutdown_client,
10247 .match_stateid = nfs41_match_stateid,
10248 .find_root_sec = nfs41_find_root_sec,
10249 .free_lock_state = nfs41_free_lock_state,
10250 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10251 .alloc_seqid = nfs_alloc_no_seqid,
10252 .session_trunk = nfs4_test_session_trunk,
10253 .call_sync_ops = &nfs41_call_sync_ops,
10254 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10255 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10256 .state_renewal_ops = &nfs41_state_renewal_ops,
10257 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10258 };
10259 #endif
10260
10261 #if defined(CONFIG_NFS_V4_2)
10262 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
10263 .minor_version = 2,
10264 .init_caps = NFS_CAP_READDIRPLUS
10265 | NFS_CAP_ATOMIC_OPEN
10266 | NFS_CAP_POSIX_LOCK
10267 | NFS_CAP_STATEID_NFSV41
10268 | NFS_CAP_ATOMIC_OPEN_V1
10269 | NFS_CAP_LGOPEN
10270 | NFS_CAP_ALLOCATE
10271 | NFS_CAP_COPY
10272 | NFS_CAP_OFFLOAD_CANCEL
10273 | NFS_CAP_COPY_NOTIFY
10274 | NFS_CAP_DEALLOCATE
10275 | NFS_CAP_SEEK
10276 | NFS_CAP_LAYOUTSTATS
10277 | NFS_CAP_CLONE
10278 | NFS_CAP_LAYOUTERROR
10279 | NFS_CAP_READ_PLUS,
10280 .init_client = nfs41_init_client,
10281 .shutdown_client = nfs41_shutdown_client,
10282 .match_stateid = nfs41_match_stateid,
10283 .find_root_sec = nfs41_find_root_sec,
10284 .free_lock_state = nfs41_free_lock_state,
10285 .call_sync_ops = &nfs41_call_sync_ops,
10286 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10287 .alloc_seqid = nfs_alloc_no_seqid,
10288 .session_trunk = nfs4_test_session_trunk,
10289 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10290 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10291 .state_renewal_ops = &nfs41_state_renewal_ops,
10292 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10293 };
10294 #endif
10295
10296 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
10297 [0] = &nfs_v4_0_minor_ops,
10298 #if defined(CONFIG_NFS_V4_1)
10299 [1] = &nfs_v4_1_minor_ops,
10300 #endif
10301 #if defined(CONFIG_NFS_V4_2)
10302 [2] = &nfs_v4_2_minor_ops,
10303 #endif
10304 };
10305
10306 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
10307 {
10308 ssize_t error, error2, error3;
10309
10310 error = generic_listxattr(dentry, list, size);
10311 if (error < 0)
10312 return error;
10313 if (list) {
10314 list += error;
10315 size -= error;
10316 }
10317
10318 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
10319 if (error2 < 0)
10320 return error2;
10321
10322 if (list) {
10323 list += error2;
10324 size -= error2;
10325 }
10326
10327 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
10328 if (error3 < 0)
10329 return error3;
10330
10331 return error + error2 + error3;
10332 }
10333
10334 static const struct inode_operations nfs4_dir_inode_operations = {
10335 .create = nfs_create,
10336 .lookup = nfs_lookup,
10337 .atomic_open = nfs_atomic_open,
10338 .link = nfs_link,
10339 .unlink = nfs_unlink,
10340 .symlink = nfs_symlink,
10341 .mkdir = nfs_mkdir,
10342 .rmdir = nfs_rmdir,
10343 .mknod = nfs_mknod,
10344 .rename = nfs_rename,
10345 .permission = nfs_permission,
10346 .getattr = nfs_getattr,
10347 .setattr = nfs_setattr,
10348 .listxattr = nfs4_listxattr,
10349 };
10350
10351 static const struct inode_operations nfs4_file_inode_operations = {
10352 .permission = nfs_permission,
10353 .getattr = nfs_getattr,
10354 .setattr = nfs_setattr,
10355 .listxattr = nfs4_listxattr,
10356 };
10357
10358 const struct nfs_rpc_ops nfs_v4_clientops = {
10359 .version = 4, /* protocol version */
10360 .dentry_ops = &nfs4_dentry_operations,
10361 .dir_inode_ops = &nfs4_dir_inode_operations,
10362 .file_inode_ops = &nfs4_file_inode_operations,
10363 .file_ops = &nfs4_file_operations,
10364 .getroot = nfs4_proc_get_root,
10365 .submount = nfs4_submount,
10366 .try_get_tree = nfs4_try_get_tree,
10367 .getattr = nfs4_proc_getattr,
10368 .setattr = nfs4_proc_setattr,
10369 .lookup = nfs4_proc_lookup,
10370 .lookupp = nfs4_proc_lookupp,
10371 .access = nfs4_proc_access,
10372 .readlink = nfs4_proc_readlink,
10373 .create = nfs4_proc_create,
10374 .remove = nfs4_proc_remove,
10375 .unlink_setup = nfs4_proc_unlink_setup,
10376 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
10377 .unlink_done = nfs4_proc_unlink_done,
10378 .rename_setup = nfs4_proc_rename_setup,
10379 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
10380 .rename_done = nfs4_proc_rename_done,
10381 .link = nfs4_proc_link,
10382 .symlink = nfs4_proc_symlink,
10383 .mkdir = nfs4_proc_mkdir,
10384 .rmdir = nfs4_proc_rmdir,
10385 .readdir = nfs4_proc_readdir,
10386 .mknod = nfs4_proc_mknod,
10387 .statfs = nfs4_proc_statfs,
10388 .fsinfo = nfs4_proc_fsinfo,
10389 .pathconf = nfs4_proc_pathconf,
10390 .set_capabilities = nfs4_server_capabilities,
10391 .decode_dirent = nfs4_decode_dirent,
10392 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
10393 .read_setup = nfs4_proc_read_setup,
10394 .read_done = nfs4_read_done,
10395 .write_setup = nfs4_proc_write_setup,
10396 .write_done = nfs4_write_done,
10397 .commit_setup = nfs4_proc_commit_setup,
10398 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
10399 .commit_done = nfs4_commit_done,
10400 .lock = nfs4_proc_lock,
10401 .clear_acl_cache = nfs4_zap_acl_attr,
10402 .close_context = nfs4_close_context,
10403 .open_context = nfs4_atomic_open,
10404 .have_delegation = nfs4_have_delegation,
10405 .alloc_client = nfs4_alloc_client,
10406 .init_client = nfs4_init_client,
10407 .free_client = nfs4_free_client,
10408 .create_server = nfs4_create_server,
10409 .clone_server = nfs_clone_server,
10410 };
10411
10412 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
10413 .name = XATTR_NAME_NFSV4_ACL,
10414 .list = nfs4_xattr_list_nfs4_acl,
10415 .get = nfs4_xattr_get_nfs4_acl,
10416 .set = nfs4_xattr_set_nfs4_acl,
10417 };
10418
10419 #ifdef CONFIG_NFS_V4_2
10420 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
10421 .prefix = XATTR_USER_PREFIX,
10422 .get = nfs4_xattr_get_nfs4_user,
10423 .set = nfs4_xattr_set_nfs4_user,
10424 };
10425 #endif
10426
10427 const struct xattr_handler *nfs4_xattr_handlers[] = {
10428 &nfs4_xattr_nfs4_acl_handler,
10429 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
10430 &nfs4_xattr_nfs4_label_handler,
10431 #endif
10432 #ifdef CONFIG_NFS_V4_2
10433 &nfs4_xattr_nfs4_user_handler,
10434 #endif
10435 NULL
10436 };
10437
10438 /*
10439 * Local variables:
10440 * c-basic-offset: 8
10441 * End:
10442 */