]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/nfs/nfs4proc.c
8e6c653a45affce2181196db7cb4a08b2feb15f7
[mirror_ubuntu-jammy-kernel.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/xattr.h>
55 #include <linux/utsname.h>
56 #include <linux/freezer.h>
57 #include <linux/iversion.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "sysfs.h"
67 #include "nfs4idmap.h"
68 #include "nfs4session.h"
69 #include "fscache.h"
70 #include "nfs42.h"
71
72 #include "nfs4trace.h"
73
74 #define NFSDBG_FACILITY NFSDBG_PROC
75
76 #define NFS4_BITMASK_SZ 3
77
78 #define NFS4_POLL_RETRY_MIN (HZ/10)
79 #define NFS4_POLL_RETRY_MAX (15*HZ)
80
81 /* file attributes which can be mapped to nfs attributes */
82 #define NFS4_VALID_ATTRS (ATTR_MODE \
83 | ATTR_UID \
84 | ATTR_GID \
85 | ATTR_SIZE \
86 | ATTR_ATIME \
87 | ATTR_MTIME \
88 | ATTR_CTIME \
89 | ATTR_ATIME_SET \
90 | ATTR_MTIME_SET)
91
92 struct nfs4_opendata;
93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label, struct inode *inode);
97 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
98 struct nfs_fattr *fattr, struct iattr *sattr,
99 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
100 struct nfs4_label *olabel);
101 #ifdef CONFIG_NFS_V4_1
102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
103 const struct cred *cred,
104 struct nfs4_slot *slot,
105 bool is_privileged);
106 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
107 const struct cred *);
108 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
109 const struct cred *, bool);
110 #endif
111 static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ],
112 const __u32 *src, struct inode *inode,
113 struct nfs_server *server,
114 struct nfs4_label *label);
115
116 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
117 static inline struct nfs4_label *
118 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
119 struct iattr *sattr, struct nfs4_label *label)
120 {
121 int err;
122
123 if (label == NULL)
124 return NULL;
125
126 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
127 return NULL;
128
129 err = security_dentry_init_security(dentry, sattr->ia_mode,
130 &dentry->d_name, (void **)&label->label, &label->len);
131 if (err == 0)
132 return label;
133
134 return NULL;
135 }
136 static inline void
137 nfs4_label_release_security(struct nfs4_label *label)
138 {
139 struct lsmcontext scaff; /* scaffolding */
140
141 if (label) {
142 lsmcontext_init(&scaff, label->label, label->len, 0);
143 security_release_secctx(&scaff);
144 }
145 }
146 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
147 {
148 if (label)
149 return server->attr_bitmask;
150
151 return server->attr_bitmask_nl;
152 }
153 #else
154 static inline struct nfs4_label *
155 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
156 struct iattr *sattr, struct nfs4_label *l)
157 { return NULL; }
158 static inline void
159 nfs4_label_release_security(struct nfs4_label *label)
160 { return; }
161 static inline u32 *
162 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
163 { return server->attr_bitmask; }
164 #endif
165
166 /* Prevent leaks of NFSv4 errors into userland */
167 static int nfs4_map_errors(int err)
168 {
169 if (err >= -1000)
170 return err;
171 switch (err) {
172 case -NFS4ERR_RESOURCE:
173 case -NFS4ERR_LAYOUTTRYLATER:
174 case -NFS4ERR_RECALLCONFLICT:
175 return -EREMOTEIO;
176 case -NFS4ERR_WRONGSEC:
177 case -NFS4ERR_WRONG_CRED:
178 return -EPERM;
179 case -NFS4ERR_BADOWNER:
180 case -NFS4ERR_BADNAME:
181 return -EINVAL;
182 case -NFS4ERR_SHARE_DENIED:
183 return -EACCES;
184 case -NFS4ERR_MINOR_VERS_MISMATCH:
185 return -EPROTONOSUPPORT;
186 case -NFS4ERR_FILE_OPEN:
187 return -EBUSY;
188 case -NFS4ERR_NOT_SAME:
189 return -ENOTSYNC;
190 default:
191 dprintk("%s could not handle NFSv4 error %d\n",
192 __func__, -err);
193 break;
194 }
195 return -EIO;
196 }
197
198 /*
199 * This is our standard bitmap for GETATTR requests.
200 */
201 const u32 nfs4_fattr_bitmap[3] = {
202 FATTR4_WORD0_TYPE
203 | FATTR4_WORD0_CHANGE
204 | FATTR4_WORD0_SIZE
205 | FATTR4_WORD0_FSID
206 | FATTR4_WORD0_FILEID,
207 FATTR4_WORD1_MODE
208 | FATTR4_WORD1_NUMLINKS
209 | FATTR4_WORD1_OWNER
210 | FATTR4_WORD1_OWNER_GROUP
211 | FATTR4_WORD1_RAWDEV
212 | FATTR4_WORD1_SPACE_USED
213 | FATTR4_WORD1_TIME_ACCESS
214 | FATTR4_WORD1_TIME_METADATA
215 | FATTR4_WORD1_TIME_MODIFY
216 | FATTR4_WORD1_MOUNTED_ON_FILEID,
217 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
218 FATTR4_WORD2_SECURITY_LABEL
219 #endif
220 };
221
222 static const u32 nfs4_pnfs_open_bitmap[3] = {
223 FATTR4_WORD0_TYPE
224 | FATTR4_WORD0_CHANGE
225 | FATTR4_WORD0_SIZE
226 | FATTR4_WORD0_FSID
227 | FATTR4_WORD0_FILEID,
228 FATTR4_WORD1_MODE
229 | FATTR4_WORD1_NUMLINKS
230 | FATTR4_WORD1_OWNER
231 | FATTR4_WORD1_OWNER_GROUP
232 | FATTR4_WORD1_RAWDEV
233 | FATTR4_WORD1_SPACE_USED
234 | FATTR4_WORD1_TIME_ACCESS
235 | FATTR4_WORD1_TIME_METADATA
236 | FATTR4_WORD1_TIME_MODIFY,
237 FATTR4_WORD2_MDSTHRESHOLD
238 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
239 | FATTR4_WORD2_SECURITY_LABEL
240 #endif
241 };
242
243 static const u32 nfs4_open_noattr_bitmap[3] = {
244 FATTR4_WORD0_TYPE
245 | FATTR4_WORD0_FILEID,
246 };
247
248 const u32 nfs4_statfs_bitmap[3] = {
249 FATTR4_WORD0_FILES_AVAIL
250 | FATTR4_WORD0_FILES_FREE
251 | FATTR4_WORD0_FILES_TOTAL,
252 FATTR4_WORD1_SPACE_AVAIL
253 | FATTR4_WORD1_SPACE_FREE
254 | FATTR4_WORD1_SPACE_TOTAL
255 };
256
257 const u32 nfs4_pathconf_bitmap[3] = {
258 FATTR4_WORD0_MAXLINK
259 | FATTR4_WORD0_MAXNAME,
260 0
261 };
262
263 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
264 | FATTR4_WORD0_MAXREAD
265 | FATTR4_WORD0_MAXWRITE
266 | FATTR4_WORD0_LEASE_TIME,
267 FATTR4_WORD1_TIME_DELTA
268 | FATTR4_WORD1_FS_LAYOUT_TYPES,
269 FATTR4_WORD2_LAYOUT_BLKSIZE
270 | FATTR4_WORD2_CLONE_BLKSIZE
271 | FATTR4_WORD2_CHANGE_ATTR_TYPE
272 | FATTR4_WORD2_XATTR_SUPPORT
273 };
274
275 const u32 nfs4_fs_locations_bitmap[3] = {
276 FATTR4_WORD0_CHANGE
277 | FATTR4_WORD0_SIZE
278 | FATTR4_WORD0_FSID
279 | FATTR4_WORD0_FILEID
280 | FATTR4_WORD0_FS_LOCATIONS,
281 FATTR4_WORD1_OWNER
282 | FATTR4_WORD1_OWNER_GROUP
283 | FATTR4_WORD1_RAWDEV
284 | FATTR4_WORD1_SPACE_USED
285 | FATTR4_WORD1_TIME_ACCESS
286 | FATTR4_WORD1_TIME_METADATA
287 | FATTR4_WORD1_TIME_MODIFY
288 | FATTR4_WORD1_MOUNTED_ON_FILEID,
289 };
290
291 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
292 struct inode *inode, unsigned long flags)
293 {
294 unsigned long cache_validity;
295
296 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
297 if (!inode || !nfs4_have_delegation(inode, FMODE_READ))
298 return;
299
300 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags;
301
302 /* Remove the attributes over which we have full control */
303 dst[1] &= ~FATTR4_WORD1_RAWDEV;
304 if (!(cache_validity & NFS_INO_INVALID_SIZE))
305 dst[0] &= ~FATTR4_WORD0_SIZE;
306
307 if (!(cache_validity & NFS_INO_INVALID_CHANGE))
308 dst[0] &= ~FATTR4_WORD0_CHANGE;
309
310 if (!(cache_validity & NFS_INO_INVALID_MODE))
311 dst[1] &= ~FATTR4_WORD1_MODE;
312 if (!(cache_validity & NFS_INO_INVALID_OTHER))
313 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP);
314 }
315
316 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
317 struct nfs4_readdir_arg *readdir)
318 {
319 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
320 __be32 *start, *p;
321
322 if (cookie > 2) {
323 readdir->cookie = cookie;
324 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
325 return;
326 }
327
328 readdir->cookie = 0;
329 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
330 if (cookie == 2)
331 return;
332
333 /*
334 * NFSv4 servers do not return entries for '.' and '..'
335 * Therefore, we fake these entries here. We let '.'
336 * have cookie 0 and '..' have cookie 1. Note that
337 * when talking to the server, we always send cookie 0
338 * instead of 1 or 2.
339 */
340 start = p = kmap_atomic(*readdir->pages);
341
342 if (cookie == 0) {
343 *p++ = xdr_one; /* next */
344 *p++ = xdr_zero; /* cookie, first word */
345 *p++ = xdr_one; /* cookie, second word */
346 *p++ = xdr_one; /* entry len */
347 memcpy(p, ".\0\0\0", 4); /* entry */
348 p++;
349 *p++ = xdr_one; /* bitmap length */
350 *p++ = htonl(attrs); /* bitmap */
351 *p++ = htonl(12); /* attribute buffer length */
352 *p++ = htonl(NF4DIR);
353 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
354 }
355
356 *p++ = xdr_one; /* next */
357 *p++ = xdr_zero; /* cookie, first word */
358 *p++ = xdr_two; /* cookie, second word */
359 *p++ = xdr_two; /* entry len */
360 memcpy(p, "..\0\0", 4); /* entry */
361 p++;
362 *p++ = xdr_one; /* bitmap length */
363 *p++ = htonl(attrs); /* bitmap */
364 *p++ = htonl(12); /* attribute buffer length */
365 *p++ = htonl(NF4DIR);
366 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
367
368 readdir->pgbase = (char *)p - (char *)start;
369 readdir->count -= readdir->pgbase;
370 kunmap_atomic(start);
371 }
372
373 static void nfs4_test_and_free_stateid(struct nfs_server *server,
374 nfs4_stateid *stateid,
375 const struct cred *cred)
376 {
377 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
378
379 ops->test_and_free_expired(server, stateid, cred);
380 }
381
382 static void __nfs4_free_revoked_stateid(struct nfs_server *server,
383 nfs4_stateid *stateid,
384 const struct cred *cred)
385 {
386 stateid->type = NFS4_REVOKED_STATEID_TYPE;
387 nfs4_test_and_free_stateid(server, stateid, cred);
388 }
389
390 static void nfs4_free_revoked_stateid(struct nfs_server *server,
391 const nfs4_stateid *stateid,
392 const struct cred *cred)
393 {
394 nfs4_stateid tmp;
395
396 nfs4_stateid_copy(&tmp, stateid);
397 __nfs4_free_revoked_stateid(server, &tmp, cred);
398 }
399
400 static long nfs4_update_delay(long *timeout)
401 {
402 long ret;
403 if (!timeout)
404 return NFS4_POLL_RETRY_MAX;
405 if (*timeout <= 0)
406 *timeout = NFS4_POLL_RETRY_MIN;
407 if (*timeout > NFS4_POLL_RETRY_MAX)
408 *timeout = NFS4_POLL_RETRY_MAX;
409 ret = *timeout;
410 *timeout <<= 1;
411 return ret;
412 }
413
414 static int nfs4_delay_killable(long *timeout)
415 {
416 might_sleep();
417
418 freezable_schedule_timeout_killable_unsafe(
419 nfs4_update_delay(timeout));
420 if (!__fatal_signal_pending(current))
421 return 0;
422 return -EINTR;
423 }
424
425 static int nfs4_delay_interruptible(long *timeout)
426 {
427 might_sleep();
428
429 freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout));
430 if (!signal_pending(current))
431 return 0;
432 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
433 }
434
435 static int nfs4_delay(long *timeout, bool interruptible)
436 {
437 if (interruptible)
438 return nfs4_delay_interruptible(timeout);
439 return nfs4_delay_killable(timeout);
440 }
441
442 static const nfs4_stateid *
443 nfs4_recoverable_stateid(const nfs4_stateid *stateid)
444 {
445 if (!stateid)
446 return NULL;
447 switch (stateid->type) {
448 case NFS4_OPEN_STATEID_TYPE:
449 case NFS4_LOCK_STATEID_TYPE:
450 case NFS4_DELEGATION_STATEID_TYPE:
451 return stateid;
452 default:
453 break;
454 }
455 return NULL;
456 }
457
458 /* This is the error handling routine for processes that are allowed
459 * to sleep.
460 */
461 static int nfs4_do_handle_exception(struct nfs_server *server,
462 int errorcode, struct nfs4_exception *exception)
463 {
464 struct nfs_client *clp = server->nfs_client;
465 struct nfs4_state *state = exception->state;
466 const nfs4_stateid *stateid;
467 struct inode *inode = exception->inode;
468 int ret = errorcode;
469
470 exception->delay = 0;
471 exception->recovering = 0;
472 exception->retry = 0;
473
474 stateid = nfs4_recoverable_stateid(exception->stateid);
475 if (stateid == NULL && state != NULL)
476 stateid = nfs4_recoverable_stateid(&state->stateid);
477
478 switch(errorcode) {
479 case 0:
480 return 0;
481 case -NFS4ERR_BADHANDLE:
482 case -ESTALE:
483 if (inode != NULL && S_ISREG(inode->i_mode))
484 pnfs_destroy_layout(NFS_I(inode));
485 break;
486 case -NFS4ERR_DELEG_REVOKED:
487 case -NFS4ERR_ADMIN_REVOKED:
488 case -NFS4ERR_EXPIRED:
489 case -NFS4ERR_BAD_STATEID:
490 case -NFS4ERR_PARTNER_NO_AUTH:
491 if (inode != NULL && stateid != NULL) {
492 nfs_inode_find_state_and_recover(inode,
493 stateid);
494 goto wait_on_recovery;
495 }
496 fallthrough;
497 case -NFS4ERR_OPENMODE:
498 if (inode) {
499 int err;
500
501 err = nfs_async_inode_return_delegation(inode,
502 stateid);
503 if (err == 0)
504 goto wait_on_recovery;
505 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
506 exception->retry = 1;
507 break;
508 }
509 }
510 if (state == NULL)
511 break;
512 ret = nfs4_schedule_stateid_recovery(server, state);
513 if (ret < 0)
514 break;
515 goto wait_on_recovery;
516 case -NFS4ERR_STALE_STATEID:
517 case -NFS4ERR_STALE_CLIENTID:
518 nfs4_schedule_lease_recovery(clp);
519 goto wait_on_recovery;
520 case -NFS4ERR_MOVED:
521 ret = nfs4_schedule_migration_recovery(server);
522 if (ret < 0)
523 break;
524 goto wait_on_recovery;
525 case -NFS4ERR_LEASE_MOVED:
526 nfs4_schedule_lease_moved_recovery(clp);
527 goto wait_on_recovery;
528 #if defined(CONFIG_NFS_V4_1)
529 case -NFS4ERR_BADSESSION:
530 case -NFS4ERR_BADSLOT:
531 case -NFS4ERR_BAD_HIGH_SLOT:
532 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
533 case -NFS4ERR_DEADSESSION:
534 case -NFS4ERR_SEQ_FALSE_RETRY:
535 case -NFS4ERR_SEQ_MISORDERED:
536 /* Handled in nfs41_sequence_process() */
537 goto wait_on_recovery;
538 #endif /* defined(CONFIG_NFS_V4_1) */
539 case -NFS4ERR_FILE_OPEN:
540 if (exception->timeout > HZ) {
541 /* We have retried a decent amount, time to
542 * fail
543 */
544 ret = -EBUSY;
545 break;
546 }
547 fallthrough;
548 case -NFS4ERR_DELAY:
549 nfs_inc_server_stats(server, NFSIOS_DELAY);
550 fallthrough;
551 case -NFS4ERR_GRACE:
552 case -NFS4ERR_LAYOUTTRYLATER:
553 case -NFS4ERR_RECALLCONFLICT:
554 exception->delay = 1;
555 return 0;
556
557 case -NFS4ERR_RETRY_UNCACHED_REP:
558 case -NFS4ERR_OLD_STATEID:
559 exception->retry = 1;
560 break;
561 case -NFS4ERR_BADOWNER:
562 /* The following works around a Linux server bug! */
563 case -NFS4ERR_BADNAME:
564 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
565 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
566 exception->retry = 1;
567 printk(KERN_WARNING "NFS: v4 server %s "
568 "does not accept raw "
569 "uid/gids. "
570 "Reenabling the idmapper.\n",
571 server->nfs_client->cl_hostname);
572 }
573 }
574 /* We failed to handle the error */
575 return nfs4_map_errors(ret);
576 wait_on_recovery:
577 exception->recovering = 1;
578 return 0;
579 }
580
581 /* This is the error handling routine for processes that are allowed
582 * to sleep.
583 */
584 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
585 {
586 struct nfs_client *clp = server->nfs_client;
587 int ret;
588
589 ret = nfs4_do_handle_exception(server, errorcode, exception);
590 if (exception->delay) {
591 ret = nfs4_delay(&exception->timeout,
592 exception->interruptible);
593 goto out_retry;
594 }
595 if (exception->recovering) {
596 if (exception->task_is_privileged)
597 return -EDEADLOCK;
598 ret = nfs4_wait_clnt_recover(clp);
599 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
600 return -EIO;
601 goto out_retry;
602 }
603 return ret;
604 out_retry:
605 if (ret == 0)
606 exception->retry = 1;
607 return ret;
608 }
609
610 static int
611 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
612 int errorcode, struct nfs4_exception *exception)
613 {
614 struct nfs_client *clp = server->nfs_client;
615 int ret;
616
617 ret = nfs4_do_handle_exception(server, errorcode, exception);
618 if (exception->delay) {
619 rpc_delay(task, nfs4_update_delay(&exception->timeout));
620 goto out_retry;
621 }
622 if (exception->recovering) {
623 if (exception->task_is_privileged)
624 return -EDEADLOCK;
625 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
626 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
627 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
628 goto out_retry;
629 }
630 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
631 ret = -EIO;
632 return ret;
633 out_retry:
634 if (ret == 0) {
635 exception->retry = 1;
636 /*
637 * For NFS4ERR_MOVED, the client transport will need to
638 * be recomputed after migration recovery has completed.
639 */
640 if (errorcode == -NFS4ERR_MOVED)
641 rpc_task_release_transport(task);
642 }
643 return ret;
644 }
645
646 int
647 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
648 struct nfs4_state *state, long *timeout)
649 {
650 struct nfs4_exception exception = {
651 .state = state,
652 };
653
654 if (task->tk_status >= 0)
655 return 0;
656 if (timeout)
657 exception.timeout = *timeout;
658 task->tk_status = nfs4_async_handle_exception(task, server,
659 task->tk_status,
660 &exception);
661 if (exception.delay && timeout)
662 *timeout = exception.timeout;
663 if (exception.retry)
664 return -EAGAIN;
665 return 0;
666 }
667
668 /*
669 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
670 * or 'false' otherwise.
671 */
672 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
673 {
674 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
675 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
676 }
677
678 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
679 {
680 spin_lock(&clp->cl_lock);
681 if (time_before(clp->cl_last_renewal,timestamp))
682 clp->cl_last_renewal = timestamp;
683 spin_unlock(&clp->cl_lock);
684 }
685
686 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
687 {
688 struct nfs_client *clp = server->nfs_client;
689
690 if (!nfs4_has_session(clp))
691 do_renew_lease(clp, timestamp);
692 }
693
694 struct nfs4_call_sync_data {
695 const struct nfs_server *seq_server;
696 struct nfs4_sequence_args *seq_args;
697 struct nfs4_sequence_res *seq_res;
698 };
699
700 void nfs4_init_sequence(struct nfs4_sequence_args *args,
701 struct nfs4_sequence_res *res, int cache_reply,
702 int privileged)
703 {
704 args->sa_slot = NULL;
705 args->sa_cache_this = cache_reply;
706 args->sa_privileged = privileged;
707
708 res->sr_slot = NULL;
709 }
710
711 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
712 {
713 struct nfs4_slot *slot = res->sr_slot;
714 struct nfs4_slot_table *tbl;
715
716 tbl = slot->table;
717 spin_lock(&tbl->slot_tbl_lock);
718 if (!nfs41_wake_and_assign_slot(tbl, slot))
719 nfs4_free_slot(tbl, slot);
720 spin_unlock(&tbl->slot_tbl_lock);
721
722 res->sr_slot = NULL;
723 }
724
725 static int nfs40_sequence_done(struct rpc_task *task,
726 struct nfs4_sequence_res *res)
727 {
728 if (res->sr_slot != NULL)
729 nfs40_sequence_free_slot(res);
730 return 1;
731 }
732
733 #if defined(CONFIG_NFS_V4_1)
734
735 static void nfs41_release_slot(struct nfs4_slot *slot)
736 {
737 struct nfs4_session *session;
738 struct nfs4_slot_table *tbl;
739 bool send_new_highest_used_slotid = false;
740
741 if (!slot)
742 return;
743 tbl = slot->table;
744 session = tbl->session;
745
746 /* Bump the slot sequence number */
747 if (slot->seq_done)
748 slot->seq_nr++;
749 slot->seq_done = 0;
750
751 spin_lock(&tbl->slot_tbl_lock);
752 /* Be nice to the server: try to ensure that the last transmitted
753 * value for highest_user_slotid <= target_highest_slotid
754 */
755 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
756 send_new_highest_used_slotid = true;
757
758 if (nfs41_wake_and_assign_slot(tbl, slot)) {
759 send_new_highest_used_slotid = false;
760 goto out_unlock;
761 }
762 nfs4_free_slot(tbl, slot);
763
764 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
765 send_new_highest_used_slotid = false;
766 out_unlock:
767 spin_unlock(&tbl->slot_tbl_lock);
768 if (send_new_highest_used_slotid)
769 nfs41_notify_server(session->clp);
770 if (waitqueue_active(&tbl->slot_waitq))
771 wake_up_all(&tbl->slot_waitq);
772 }
773
774 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
775 {
776 nfs41_release_slot(res->sr_slot);
777 res->sr_slot = NULL;
778 }
779
780 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
781 u32 seqnr)
782 {
783 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
784 slot->seq_nr_highest_sent = seqnr;
785 }
786 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot,
787 u32 seqnr)
788 {
789 slot->seq_nr_highest_sent = seqnr;
790 slot->seq_nr_last_acked = seqnr;
791 }
792
793 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
794 struct nfs4_slot *slot)
795 {
796 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
797 if (!IS_ERR(task))
798 rpc_put_task_async(task);
799 }
800
801 static int nfs41_sequence_process(struct rpc_task *task,
802 struct nfs4_sequence_res *res)
803 {
804 struct nfs4_session *session;
805 struct nfs4_slot *slot = res->sr_slot;
806 struct nfs_client *clp;
807 int status;
808 int ret = 1;
809
810 if (slot == NULL)
811 goto out_noaction;
812 /* don't increment the sequence number if the task wasn't sent */
813 if (!RPC_WAS_SENT(task) || slot->seq_done)
814 goto out;
815
816 session = slot->table->session;
817 clp = session->clp;
818
819 trace_nfs4_sequence_done(session, res);
820
821 status = res->sr_status;
822 if (task->tk_status == -NFS4ERR_DEADSESSION)
823 status = -NFS4ERR_DEADSESSION;
824
825 /* Check the SEQUENCE operation status */
826 switch (status) {
827 case 0:
828 /* Mark this sequence number as having been acked */
829 nfs4_slot_sequence_acked(slot, slot->seq_nr);
830 /* Update the slot's sequence and clientid lease timer */
831 slot->seq_done = 1;
832 do_renew_lease(clp, res->sr_timestamp);
833 /* Check sequence flags */
834 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
835 !!slot->privileged);
836 nfs41_update_target_slotid(slot->table, slot, res);
837 break;
838 case 1:
839 /*
840 * sr_status remains 1 if an RPC level error occurred.
841 * The server may or may not have processed the sequence
842 * operation..
843 */
844 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
845 slot->seq_done = 1;
846 goto out;
847 case -NFS4ERR_DELAY:
848 /* The server detected a resend of the RPC call and
849 * returned NFS4ERR_DELAY as per Section 2.10.6.2
850 * of RFC5661.
851 */
852 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
853 __func__,
854 slot->slot_nr,
855 slot->seq_nr);
856 nfs4_slot_sequence_acked(slot, slot->seq_nr);
857 goto out_retry;
858 case -NFS4ERR_RETRY_UNCACHED_REP:
859 case -NFS4ERR_SEQ_FALSE_RETRY:
860 /*
861 * The server thinks we tried to replay a request.
862 * Retry the call after bumping the sequence ID.
863 */
864 nfs4_slot_sequence_acked(slot, slot->seq_nr);
865 goto retry_new_seq;
866 case -NFS4ERR_BADSLOT:
867 /*
868 * The slot id we used was probably retired. Try again
869 * using a different slot id.
870 */
871 if (slot->slot_nr < slot->table->target_highest_slotid)
872 goto session_recover;
873 goto retry_nowait;
874 case -NFS4ERR_SEQ_MISORDERED:
875 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
876 /*
877 * Were one or more calls using this slot interrupted?
878 * If the server never received the request, then our
879 * transmitted slot sequence number may be too high. However,
880 * if the server did receive the request then it might
881 * accidentally give us a reply with a mismatched operation.
882 * We can sort this out by sending a lone sequence operation
883 * to the server on the same slot.
884 */
885 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
886 slot->seq_nr--;
887 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
888 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
889 res->sr_slot = NULL;
890 }
891 goto retry_nowait;
892 }
893 /*
894 * RFC5661:
895 * A retry might be sent while the original request is
896 * still in progress on the replier. The replier SHOULD
897 * deal with the issue by returning NFS4ERR_DELAY as the
898 * reply to SEQUENCE or CB_SEQUENCE operation, but
899 * implementations MAY return NFS4ERR_SEQ_MISORDERED.
900 *
901 * Restart the search after a delay.
902 */
903 slot->seq_nr = slot->seq_nr_highest_sent;
904 goto out_retry;
905 case -NFS4ERR_BADSESSION:
906 case -NFS4ERR_DEADSESSION:
907 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
908 goto session_recover;
909 default:
910 /* Just update the slot sequence no. */
911 slot->seq_done = 1;
912 }
913 out:
914 /* The session may be reset by one of the error handlers. */
915 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
916 out_noaction:
917 return ret;
918 session_recover:
919 nfs4_schedule_session_recovery(session, status);
920 dprintk("%s ERROR: %d Reset session\n", __func__, status);
921 nfs41_sequence_free_slot(res);
922 goto out;
923 retry_new_seq:
924 ++slot->seq_nr;
925 retry_nowait:
926 if (rpc_restart_call_prepare(task)) {
927 nfs41_sequence_free_slot(res);
928 task->tk_status = 0;
929 ret = 0;
930 }
931 goto out;
932 out_retry:
933 if (!rpc_restart_call(task))
934 goto out;
935 rpc_delay(task, NFS4_POLL_RETRY_MAX);
936 return 0;
937 }
938
939 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
940 {
941 if (!nfs41_sequence_process(task, res))
942 return 0;
943 if (res->sr_slot != NULL)
944 nfs41_sequence_free_slot(res);
945 return 1;
946
947 }
948 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
949
950 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
951 {
952 if (res->sr_slot == NULL)
953 return 1;
954 if (res->sr_slot->table->session != NULL)
955 return nfs41_sequence_process(task, res);
956 return nfs40_sequence_done(task, res);
957 }
958
959 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
960 {
961 if (res->sr_slot != NULL) {
962 if (res->sr_slot->table->session != NULL)
963 nfs41_sequence_free_slot(res);
964 else
965 nfs40_sequence_free_slot(res);
966 }
967 }
968
969 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
970 {
971 if (res->sr_slot == NULL)
972 return 1;
973 if (!res->sr_slot->table->session)
974 return nfs40_sequence_done(task, res);
975 return nfs41_sequence_done(task, res);
976 }
977 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
978
979 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
980 {
981 struct nfs4_call_sync_data *data = calldata;
982
983 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
984
985 nfs4_setup_sequence(data->seq_server->nfs_client,
986 data->seq_args, data->seq_res, task);
987 }
988
989 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
990 {
991 struct nfs4_call_sync_data *data = calldata;
992
993 nfs41_sequence_done(task, data->seq_res);
994 }
995
996 static const struct rpc_call_ops nfs41_call_sync_ops = {
997 .rpc_call_prepare = nfs41_call_sync_prepare,
998 .rpc_call_done = nfs41_call_sync_done,
999 };
1000
1001 #else /* !CONFIG_NFS_V4_1 */
1002
1003 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
1004 {
1005 return nfs40_sequence_done(task, res);
1006 }
1007
1008 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
1009 {
1010 if (res->sr_slot != NULL)
1011 nfs40_sequence_free_slot(res);
1012 }
1013
1014 int nfs4_sequence_done(struct rpc_task *task,
1015 struct nfs4_sequence_res *res)
1016 {
1017 return nfs40_sequence_done(task, res);
1018 }
1019 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1020
1021 #endif /* !CONFIG_NFS_V4_1 */
1022
1023 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
1024 {
1025 res->sr_timestamp = jiffies;
1026 res->sr_status_flags = 0;
1027 res->sr_status = 1;
1028 }
1029
1030 static
1031 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
1032 struct nfs4_sequence_res *res,
1033 struct nfs4_slot *slot)
1034 {
1035 if (!slot)
1036 return;
1037 slot->privileged = args->sa_privileged ? 1 : 0;
1038 args->sa_slot = slot;
1039
1040 res->sr_slot = slot;
1041 }
1042
1043 int nfs4_setup_sequence(struct nfs_client *client,
1044 struct nfs4_sequence_args *args,
1045 struct nfs4_sequence_res *res,
1046 struct rpc_task *task)
1047 {
1048 struct nfs4_session *session = nfs4_get_session(client);
1049 struct nfs4_slot_table *tbl = client->cl_slot_tbl;
1050 struct nfs4_slot *slot;
1051
1052 /* slot already allocated? */
1053 if (res->sr_slot != NULL)
1054 goto out_start;
1055
1056 if (session)
1057 tbl = &session->fc_slot_table;
1058
1059 spin_lock(&tbl->slot_tbl_lock);
1060 /* The state manager will wait until the slot table is empty */
1061 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
1062 goto out_sleep;
1063
1064 slot = nfs4_alloc_slot(tbl);
1065 if (IS_ERR(slot)) {
1066 if (slot == ERR_PTR(-ENOMEM))
1067 goto out_sleep_timeout;
1068 goto out_sleep;
1069 }
1070 spin_unlock(&tbl->slot_tbl_lock);
1071
1072 nfs4_sequence_attach_slot(args, res, slot);
1073
1074 trace_nfs4_setup_sequence(session, args);
1075 out_start:
1076 nfs41_sequence_res_init(res);
1077 rpc_call_start(task);
1078 return 0;
1079 out_sleep_timeout:
1080 /* Try again in 1/4 second */
1081 if (args->sa_privileged)
1082 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
1083 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
1084 else
1085 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
1086 NULL, jiffies + (HZ >> 2));
1087 spin_unlock(&tbl->slot_tbl_lock);
1088 return -EAGAIN;
1089 out_sleep:
1090 if (args->sa_privileged)
1091 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
1092 RPC_PRIORITY_PRIVILEGED);
1093 else
1094 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
1095 spin_unlock(&tbl->slot_tbl_lock);
1096 return -EAGAIN;
1097 }
1098 EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
1099
1100 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
1101 {
1102 struct nfs4_call_sync_data *data = calldata;
1103 nfs4_setup_sequence(data->seq_server->nfs_client,
1104 data->seq_args, data->seq_res, task);
1105 }
1106
1107 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
1108 {
1109 struct nfs4_call_sync_data *data = calldata;
1110 nfs4_sequence_done(task, data->seq_res);
1111 }
1112
1113 static const struct rpc_call_ops nfs40_call_sync_ops = {
1114 .rpc_call_prepare = nfs40_call_sync_prepare,
1115 .rpc_call_done = nfs40_call_sync_done,
1116 };
1117
1118 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
1119 {
1120 int ret;
1121 struct rpc_task *task;
1122
1123 task = rpc_run_task(task_setup);
1124 if (IS_ERR(task))
1125 return PTR_ERR(task);
1126
1127 ret = task->tk_status;
1128 rpc_put_task(task);
1129 return ret;
1130 }
1131
1132 static int nfs4_do_call_sync(struct rpc_clnt *clnt,
1133 struct nfs_server *server,
1134 struct rpc_message *msg,
1135 struct nfs4_sequence_args *args,
1136 struct nfs4_sequence_res *res,
1137 unsigned short task_flags)
1138 {
1139 struct nfs_client *clp = server->nfs_client;
1140 struct nfs4_call_sync_data data = {
1141 .seq_server = server,
1142 .seq_args = args,
1143 .seq_res = res,
1144 };
1145 struct rpc_task_setup task_setup = {
1146 .rpc_client = clnt,
1147 .rpc_message = msg,
1148 .callback_ops = clp->cl_mvops->call_sync_ops,
1149 .callback_data = &data,
1150 .flags = task_flags,
1151 };
1152
1153 return nfs4_call_sync_custom(&task_setup);
1154 }
1155
1156 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1157 struct nfs_server *server,
1158 struct rpc_message *msg,
1159 struct nfs4_sequence_args *args,
1160 struct nfs4_sequence_res *res)
1161 {
1162 unsigned short task_flags = 0;
1163
1164 if (server->nfs_client->cl_minorversion)
1165 task_flags = RPC_TASK_MOVEABLE;
1166 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
1167 }
1168
1169
1170 int nfs4_call_sync(struct rpc_clnt *clnt,
1171 struct nfs_server *server,
1172 struct rpc_message *msg,
1173 struct nfs4_sequence_args *args,
1174 struct nfs4_sequence_res *res,
1175 int cache_reply)
1176 {
1177 nfs4_init_sequence(args, res, cache_reply, 0);
1178 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1179 }
1180
1181 static void
1182 nfs4_inc_nlink_locked(struct inode *inode)
1183 {
1184 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1185 NFS_INO_INVALID_CTIME |
1186 NFS_INO_INVALID_NLINK);
1187 inc_nlink(inode);
1188 }
1189
1190 static void
1191 nfs4_inc_nlink(struct inode *inode)
1192 {
1193 spin_lock(&inode->i_lock);
1194 nfs4_inc_nlink_locked(inode);
1195 spin_unlock(&inode->i_lock);
1196 }
1197
1198 static void
1199 nfs4_dec_nlink_locked(struct inode *inode)
1200 {
1201 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1202 NFS_INO_INVALID_CTIME |
1203 NFS_INO_INVALID_NLINK);
1204 drop_nlink(inode);
1205 }
1206
1207 static void
1208 nfs4_update_changeattr_locked(struct inode *inode,
1209 struct nfs4_change_info *cinfo,
1210 unsigned long timestamp, unsigned long cache_validity)
1211 {
1212 struct nfs_inode *nfsi = NFS_I(inode);
1213 u64 change_attr = inode_peek_iversion_raw(inode);
1214
1215 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
1216 if (S_ISDIR(inode->i_mode))
1217 cache_validity |= NFS_INO_INVALID_DATA;
1218
1219 switch (NFS_SERVER(inode)->change_attr_type) {
1220 case NFS4_CHANGE_TYPE_IS_UNDEFINED:
1221 if (cinfo->after == change_attr)
1222 goto out;
1223 break;
1224 default:
1225 if ((s64)(change_attr - cinfo->after) >= 0)
1226 goto out;
1227 }
1228
1229 inode_set_iversion_raw(inode, cinfo->after);
1230 if (!cinfo->atomic || cinfo->before != change_attr) {
1231 if (S_ISDIR(inode->i_mode))
1232 nfs_force_lookup_revalidate(inode);
1233
1234 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
1235 cache_validity |=
1236 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
1237 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
1238 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
1239 NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
1240 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
1241 }
1242 nfsi->attrtimeo_timestamp = jiffies;
1243 nfsi->read_cache_jiffies = timestamp;
1244 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1245 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
1246 out:
1247 nfs_set_cache_invalid(inode, cache_validity);
1248 }
1249
1250 void
1251 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1252 unsigned long timestamp, unsigned long cache_validity)
1253 {
1254 spin_lock(&dir->i_lock);
1255 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
1256 spin_unlock(&dir->i_lock);
1257 }
1258
1259 struct nfs4_open_createattrs {
1260 struct nfs4_label *label;
1261 struct iattr *sattr;
1262 const __u32 verf[2];
1263 };
1264
1265 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1266 int err, struct nfs4_exception *exception)
1267 {
1268 if (err != -EINVAL)
1269 return false;
1270 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1271 return false;
1272 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1273 exception->retry = 1;
1274 return true;
1275 }
1276
1277 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
1278 {
1279 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
1280 }
1281
1282 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
1283 {
1284 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
1285
1286 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
1287 }
1288
1289 static u32
1290 nfs4_map_atomic_open_share(struct nfs_server *server,
1291 fmode_t fmode, int openflags)
1292 {
1293 u32 res = 0;
1294
1295 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1296 case FMODE_READ:
1297 res = NFS4_SHARE_ACCESS_READ;
1298 break;
1299 case FMODE_WRITE:
1300 res = NFS4_SHARE_ACCESS_WRITE;
1301 break;
1302 case FMODE_READ|FMODE_WRITE:
1303 res = NFS4_SHARE_ACCESS_BOTH;
1304 }
1305 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1306 goto out;
1307 /* Want no delegation if we're using O_DIRECT */
1308 if (openflags & O_DIRECT)
1309 res |= NFS4_SHARE_WANT_NO_DELEG;
1310 out:
1311 return res;
1312 }
1313
1314 static enum open_claim_type4
1315 nfs4_map_atomic_open_claim(struct nfs_server *server,
1316 enum open_claim_type4 claim)
1317 {
1318 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1319 return claim;
1320 switch (claim) {
1321 default:
1322 return claim;
1323 case NFS4_OPEN_CLAIM_FH:
1324 return NFS4_OPEN_CLAIM_NULL;
1325 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1326 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1327 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1328 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1329 }
1330 }
1331
1332 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1333 {
1334 p->o_res.f_attr = &p->f_attr;
1335 p->o_res.f_label = p->f_label;
1336 p->o_res.seqid = p->o_arg.seqid;
1337 p->c_res.seqid = p->c_arg.seqid;
1338 p->o_res.server = p->o_arg.server;
1339 p->o_res.access_request = p->o_arg.access;
1340 nfs_fattr_init(&p->f_attr);
1341 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1342 }
1343
1344 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1345 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1346 const struct nfs4_open_createattrs *c,
1347 enum open_claim_type4 claim,
1348 gfp_t gfp_mask)
1349 {
1350 struct dentry *parent = dget_parent(dentry);
1351 struct inode *dir = d_inode(parent);
1352 struct nfs_server *server = NFS_SERVER(dir);
1353 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1354 struct nfs4_label *label = (c != NULL) ? c->label : NULL;
1355 struct nfs4_opendata *p;
1356
1357 p = kzalloc(sizeof(*p), gfp_mask);
1358 if (p == NULL)
1359 goto err;
1360
1361 p->f_label = nfs4_label_alloc(server, gfp_mask);
1362 if (IS_ERR(p->f_label))
1363 goto err_free_p;
1364
1365 p->a_label = nfs4_label_alloc(server, gfp_mask);
1366 if (IS_ERR(p->a_label))
1367 goto err_free_f;
1368
1369 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1370 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1371 if (IS_ERR(p->o_arg.seqid))
1372 goto err_free_label;
1373 nfs_sb_active(dentry->d_sb);
1374 p->dentry = dget(dentry);
1375 p->dir = parent;
1376 p->owner = sp;
1377 atomic_inc(&sp->so_count);
1378 p->o_arg.open_flags = flags;
1379 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1380 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1381 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1382 fmode, flags);
1383 if (flags & O_CREAT) {
1384 p->o_arg.umask = current_umask();
1385 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1386 if (c->sattr != NULL && c->sattr->ia_valid != 0) {
1387 p->o_arg.u.attrs = &p->attrs;
1388 memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
1389
1390 memcpy(p->o_arg.u.verifier.data, c->verf,
1391 sizeof(p->o_arg.u.verifier.data));
1392 }
1393 }
1394 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1395 * will return permission denied for all bits until close */
1396 if (!(flags & O_EXCL)) {
1397 /* ask server to check for all possible rights as results
1398 * are cached */
1399 switch (p->o_arg.claim) {
1400 default:
1401 break;
1402 case NFS4_OPEN_CLAIM_NULL:
1403 case NFS4_OPEN_CLAIM_FH:
1404 p->o_arg.access = NFS4_ACCESS_READ |
1405 NFS4_ACCESS_MODIFY |
1406 NFS4_ACCESS_EXTEND |
1407 NFS4_ACCESS_EXECUTE;
1408 #ifdef CONFIG_NFS_V4_2
1409 if (server->caps & NFS_CAP_XATTR)
1410 p->o_arg.access |= NFS4_ACCESS_XAREAD |
1411 NFS4_ACCESS_XAWRITE |
1412 NFS4_ACCESS_XALIST;
1413 #endif
1414 }
1415 }
1416 p->o_arg.clientid = server->nfs_client->cl_clientid;
1417 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1418 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1419 p->o_arg.name = &dentry->d_name;
1420 p->o_arg.server = server;
1421 p->o_arg.bitmask = nfs4_bitmask(server, label);
1422 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1423 switch (p->o_arg.claim) {
1424 case NFS4_OPEN_CLAIM_NULL:
1425 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1426 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1427 p->o_arg.fh = NFS_FH(dir);
1428 break;
1429 case NFS4_OPEN_CLAIM_PREVIOUS:
1430 case NFS4_OPEN_CLAIM_FH:
1431 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1432 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1433 p->o_arg.fh = NFS_FH(d_inode(dentry));
1434 }
1435 p->c_arg.fh = &p->o_res.fh;
1436 p->c_arg.stateid = &p->o_res.stateid;
1437 p->c_arg.seqid = p->o_arg.seqid;
1438 nfs4_init_opendata_res(p);
1439 kref_init(&p->kref);
1440 return p;
1441
1442 err_free_label:
1443 nfs4_label_free(p->a_label);
1444 err_free_f:
1445 nfs4_label_free(p->f_label);
1446 err_free_p:
1447 kfree(p);
1448 err:
1449 dput(parent);
1450 return NULL;
1451 }
1452
1453 static void nfs4_opendata_free(struct kref *kref)
1454 {
1455 struct nfs4_opendata *p = container_of(kref,
1456 struct nfs4_opendata, kref);
1457 struct super_block *sb = p->dentry->d_sb;
1458
1459 nfs4_lgopen_release(p->lgp);
1460 nfs_free_seqid(p->o_arg.seqid);
1461 nfs4_sequence_free_slot(&p->o_res.seq_res);
1462 if (p->state != NULL)
1463 nfs4_put_open_state(p->state);
1464 nfs4_put_state_owner(p->owner);
1465
1466 nfs4_label_free(p->a_label);
1467 nfs4_label_free(p->f_label);
1468
1469 dput(p->dir);
1470 dput(p->dentry);
1471 nfs_sb_deactive(sb);
1472 nfs_fattr_free_names(&p->f_attr);
1473 kfree(p->f_attr.mdsthreshold);
1474 kfree(p);
1475 }
1476
1477 static void nfs4_opendata_put(struct nfs4_opendata *p)
1478 {
1479 if (p != NULL)
1480 kref_put(&p->kref, nfs4_opendata_free);
1481 }
1482
1483 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1484 fmode_t fmode)
1485 {
1486 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1487 case FMODE_READ|FMODE_WRITE:
1488 return state->n_rdwr != 0;
1489 case FMODE_WRITE:
1490 return state->n_wronly != 0;
1491 case FMODE_READ:
1492 return state->n_rdonly != 0;
1493 }
1494 WARN_ON_ONCE(1);
1495 return false;
1496 }
1497
1498 static int can_open_cached(struct nfs4_state *state, fmode_t mode,
1499 int open_mode, enum open_claim_type4 claim)
1500 {
1501 int ret = 0;
1502
1503 if (open_mode & (O_EXCL|O_TRUNC))
1504 goto out;
1505 switch (claim) {
1506 case NFS4_OPEN_CLAIM_NULL:
1507 case NFS4_OPEN_CLAIM_FH:
1508 goto out;
1509 default:
1510 break;
1511 }
1512 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1513 case FMODE_READ:
1514 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1515 && state->n_rdonly != 0;
1516 break;
1517 case FMODE_WRITE:
1518 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1519 && state->n_wronly != 0;
1520 break;
1521 case FMODE_READ|FMODE_WRITE:
1522 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1523 && state->n_rdwr != 0;
1524 }
1525 out:
1526 return ret;
1527 }
1528
1529 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1530 enum open_claim_type4 claim)
1531 {
1532 if (delegation == NULL)
1533 return 0;
1534 if ((delegation->type & fmode) != fmode)
1535 return 0;
1536 switch (claim) {
1537 case NFS4_OPEN_CLAIM_NULL:
1538 case NFS4_OPEN_CLAIM_FH:
1539 break;
1540 case NFS4_OPEN_CLAIM_PREVIOUS:
1541 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1542 break;
1543 fallthrough;
1544 default:
1545 return 0;
1546 }
1547 nfs_mark_delegation_referenced(delegation);
1548 return 1;
1549 }
1550
1551 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1552 {
1553 switch (fmode) {
1554 case FMODE_WRITE:
1555 state->n_wronly++;
1556 break;
1557 case FMODE_READ:
1558 state->n_rdonly++;
1559 break;
1560 case FMODE_READ|FMODE_WRITE:
1561 state->n_rdwr++;
1562 }
1563 nfs4_state_set_mode_locked(state, state->state | fmode);
1564 }
1565
1566 #ifdef CONFIG_NFS_V4_1
1567 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
1568 {
1569 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
1570 return true;
1571 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
1572 return true;
1573 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
1574 return true;
1575 return false;
1576 }
1577 #endif /* CONFIG_NFS_V4_1 */
1578
1579 static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
1580 {
1581 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1582 wake_up_all(&state->waitq);
1583 }
1584
1585 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1586 {
1587 struct nfs_client *clp = state->owner->so_server->nfs_client;
1588 bool need_recover = false;
1589
1590 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1591 need_recover = true;
1592 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1593 need_recover = true;
1594 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1595 need_recover = true;
1596 if (need_recover)
1597 nfs4_state_mark_reclaim_nograce(clp, state);
1598 }
1599
1600 /*
1601 * Check for whether or not the caller may update the open stateid
1602 * to the value passed in by stateid.
1603 *
1604 * Note: This function relies heavily on the server implementing
1605 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
1606 * correctly.
1607 * i.e. The stateid seqids have to be initialised to 1, and
1608 * are then incremented on every state transition.
1609 */
1610 static bool nfs_stateid_is_sequential(struct nfs4_state *state,
1611 const nfs4_stateid *stateid)
1612 {
1613 if (test_bit(NFS_OPEN_STATE, &state->flags)) {
1614 /* The common case - we're updating to a new sequence number */
1615 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1616 if (nfs4_stateid_is_next(&state->open_stateid, stateid))
1617 return true;
1618 return false;
1619 }
1620 /* The server returned a new stateid */
1621 }
1622 /* This is the first OPEN in this generation */
1623 if (stateid->seqid == cpu_to_be32(1))
1624 return true;
1625 return false;
1626 }
1627
1628 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1629 {
1630 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1631 return;
1632 if (state->n_wronly)
1633 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1634 if (state->n_rdonly)
1635 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1636 if (state->n_rdwr)
1637 set_bit(NFS_O_RDWR_STATE, &state->flags);
1638 set_bit(NFS_OPEN_STATE, &state->flags);
1639 }
1640
1641 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1642 nfs4_stateid *stateid, fmode_t fmode)
1643 {
1644 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1645 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1646 case FMODE_WRITE:
1647 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1648 break;
1649 case FMODE_READ:
1650 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1651 break;
1652 case 0:
1653 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1654 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1655 clear_bit(NFS_OPEN_STATE, &state->flags);
1656 }
1657 if (stateid == NULL)
1658 return;
1659 /* Handle OPEN+OPEN_DOWNGRADE races */
1660 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1661 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1662 nfs_resync_open_stateid_locked(state);
1663 goto out;
1664 }
1665 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1666 nfs4_stateid_copy(&state->stateid, stateid);
1667 nfs4_stateid_copy(&state->open_stateid, stateid);
1668 trace_nfs4_open_stateid_update(state->inode, stateid, 0);
1669 out:
1670 nfs_state_log_update_open_stateid(state);
1671 }
1672
1673 static void nfs_clear_open_stateid(struct nfs4_state *state,
1674 nfs4_stateid *arg_stateid,
1675 nfs4_stateid *stateid, fmode_t fmode)
1676 {
1677 write_seqlock(&state->seqlock);
1678 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1679 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1680 nfs_clear_open_stateid_locked(state, stateid, fmode);
1681 write_sequnlock(&state->seqlock);
1682 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1683 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1684 }
1685
1686 static void nfs_set_open_stateid_locked(struct nfs4_state *state,
1687 const nfs4_stateid *stateid, nfs4_stateid *freeme)
1688 __must_hold(&state->owner->so_lock)
1689 __must_hold(&state->seqlock)
1690 __must_hold(RCU)
1691
1692 {
1693 DEFINE_WAIT(wait);
1694 int status = 0;
1695 for (;;) {
1696
1697 if (nfs_stateid_is_sequential(state, stateid))
1698 break;
1699
1700 if (status)
1701 break;
1702 /* Rely on seqids for serialisation with NFSv4.0 */
1703 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
1704 break;
1705
1706 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1707 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
1708 /*
1709 * Ensure we process the state changes in the same order
1710 * in which the server processed them by delaying the
1711 * update of the stateid until we are in sequence.
1712 */
1713 write_sequnlock(&state->seqlock);
1714 spin_unlock(&state->owner->so_lock);
1715 rcu_read_unlock();
1716 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
1717
1718 if (!fatal_signal_pending(current)) {
1719 if (schedule_timeout(5*HZ) == 0)
1720 status = -EAGAIN;
1721 else
1722 status = 0;
1723 } else
1724 status = -EINTR;
1725 finish_wait(&state->waitq, &wait);
1726 rcu_read_lock();
1727 spin_lock(&state->owner->so_lock);
1728 write_seqlock(&state->seqlock);
1729 }
1730
1731 if (test_bit(NFS_OPEN_STATE, &state->flags) &&
1732 !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1733 nfs4_stateid_copy(freeme, &state->open_stateid);
1734 nfs_test_and_clear_all_open_stateid(state);
1735 }
1736
1737 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1738 nfs4_stateid_copy(&state->stateid, stateid);
1739 nfs4_stateid_copy(&state->open_stateid, stateid);
1740 trace_nfs4_open_stateid_update(state->inode, stateid, status);
1741 nfs_state_log_update_open_stateid(state);
1742 }
1743
1744 static void nfs_state_set_open_stateid(struct nfs4_state *state,
1745 const nfs4_stateid *open_stateid,
1746 fmode_t fmode,
1747 nfs4_stateid *freeme)
1748 {
1749 /*
1750 * Protect the call to nfs4_state_set_mode_locked and
1751 * serialise the stateid update
1752 */
1753 write_seqlock(&state->seqlock);
1754 nfs_set_open_stateid_locked(state, open_stateid, freeme);
1755 switch (fmode) {
1756 case FMODE_READ:
1757 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1758 break;
1759 case FMODE_WRITE:
1760 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1761 break;
1762 case FMODE_READ|FMODE_WRITE:
1763 set_bit(NFS_O_RDWR_STATE, &state->flags);
1764 }
1765 set_bit(NFS_OPEN_STATE, &state->flags);
1766 write_sequnlock(&state->seqlock);
1767 }
1768
1769 static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
1770 {
1771 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1772 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1773 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1774 clear_bit(NFS_OPEN_STATE, &state->flags);
1775 }
1776
1777 static void nfs_state_set_delegation(struct nfs4_state *state,
1778 const nfs4_stateid *deleg_stateid,
1779 fmode_t fmode)
1780 {
1781 /*
1782 * Protect the call to nfs4_state_set_mode_locked and
1783 * serialise the stateid update
1784 */
1785 write_seqlock(&state->seqlock);
1786 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1787 set_bit(NFS_DELEGATED_STATE, &state->flags);
1788 write_sequnlock(&state->seqlock);
1789 }
1790
1791 static void nfs_state_clear_delegation(struct nfs4_state *state)
1792 {
1793 write_seqlock(&state->seqlock);
1794 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1795 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1796 write_sequnlock(&state->seqlock);
1797 }
1798
1799 int update_open_stateid(struct nfs4_state *state,
1800 const nfs4_stateid *open_stateid,
1801 const nfs4_stateid *delegation,
1802 fmode_t fmode)
1803 {
1804 struct nfs_server *server = NFS_SERVER(state->inode);
1805 struct nfs_client *clp = server->nfs_client;
1806 struct nfs_inode *nfsi = NFS_I(state->inode);
1807 struct nfs_delegation *deleg_cur;
1808 nfs4_stateid freeme = { };
1809 int ret = 0;
1810
1811 fmode &= (FMODE_READ|FMODE_WRITE);
1812
1813 rcu_read_lock();
1814 spin_lock(&state->owner->so_lock);
1815 if (open_stateid != NULL) {
1816 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
1817 ret = 1;
1818 }
1819
1820 deleg_cur = nfs4_get_valid_delegation(state->inode);
1821 if (deleg_cur == NULL)
1822 goto no_delegation;
1823
1824 spin_lock(&deleg_cur->lock);
1825 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1826 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1827 (deleg_cur->type & fmode) != fmode)
1828 goto no_delegation_unlock;
1829
1830 if (delegation == NULL)
1831 delegation = &deleg_cur->stateid;
1832 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation))
1833 goto no_delegation_unlock;
1834
1835 nfs_mark_delegation_referenced(deleg_cur);
1836 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
1837 ret = 1;
1838 no_delegation_unlock:
1839 spin_unlock(&deleg_cur->lock);
1840 no_delegation:
1841 if (ret)
1842 update_open_stateflags(state, fmode);
1843 spin_unlock(&state->owner->so_lock);
1844 rcu_read_unlock();
1845
1846 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1847 nfs4_schedule_state_manager(clp);
1848 if (freeme.type != 0)
1849 nfs4_test_and_free_stateid(server, &freeme,
1850 state->owner->so_cred);
1851
1852 return ret;
1853 }
1854
1855 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1856 const nfs4_stateid *stateid)
1857 {
1858 struct nfs4_state *state = lsp->ls_state;
1859 bool ret = false;
1860
1861 spin_lock(&state->state_lock);
1862 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1863 goto out_noupdate;
1864 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1865 goto out_noupdate;
1866 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1867 ret = true;
1868 out_noupdate:
1869 spin_unlock(&state->state_lock);
1870 return ret;
1871 }
1872
1873 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1874 {
1875 struct nfs_delegation *delegation;
1876
1877 fmode &= FMODE_READ|FMODE_WRITE;
1878 rcu_read_lock();
1879 delegation = nfs4_get_valid_delegation(inode);
1880 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1881 rcu_read_unlock();
1882 return;
1883 }
1884 rcu_read_unlock();
1885 nfs4_inode_return_delegation(inode);
1886 }
1887
1888 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1889 {
1890 struct nfs4_state *state = opendata->state;
1891 struct nfs_delegation *delegation;
1892 int open_mode = opendata->o_arg.open_flags;
1893 fmode_t fmode = opendata->o_arg.fmode;
1894 enum open_claim_type4 claim = opendata->o_arg.claim;
1895 nfs4_stateid stateid;
1896 int ret = -EAGAIN;
1897
1898 for (;;) {
1899 spin_lock(&state->owner->so_lock);
1900 if (can_open_cached(state, fmode, open_mode, claim)) {
1901 update_open_stateflags(state, fmode);
1902 spin_unlock(&state->owner->so_lock);
1903 goto out_return_state;
1904 }
1905 spin_unlock(&state->owner->so_lock);
1906 rcu_read_lock();
1907 delegation = nfs4_get_valid_delegation(state->inode);
1908 if (!can_open_delegated(delegation, fmode, claim)) {
1909 rcu_read_unlock();
1910 break;
1911 }
1912 /* Save the delegation */
1913 nfs4_stateid_copy(&stateid, &delegation->stateid);
1914 rcu_read_unlock();
1915 nfs_release_seqid(opendata->o_arg.seqid);
1916 if (!opendata->is_recover) {
1917 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1918 if (ret != 0)
1919 goto out;
1920 }
1921 ret = -EAGAIN;
1922
1923 /* Try to update the stateid using the delegation */
1924 if (update_open_stateid(state, NULL, &stateid, fmode))
1925 goto out_return_state;
1926 }
1927 out:
1928 return ERR_PTR(ret);
1929 out_return_state:
1930 refcount_inc(&state->count);
1931 return state;
1932 }
1933
1934 static void
1935 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1936 {
1937 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1938 struct nfs_delegation *delegation;
1939 int delegation_flags = 0;
1940
1941 rcu_read_lock();
1942 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1943 if (delegation)
1944 delegation_flags = delegation->flags;
1945 rcu_read_unlock();
1946 switch (data->o_arg.claim) {
1947 default:
1948 break;
1949 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1950 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1951 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1952 "returning a delegation for "
1953 "OPEN(CLAIM_DELEGATE_CUR)\n",
1954 clp->cl_hostname);
1955 return;
1956 }
1957 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1958 nfs_inode_set_delegation(state->inode,
1959 data->owner->so_cred,
1960 data->o_res.delegation_type,
1961 &data->o_res.delegation,
1962 data->o_res.pagemod_limit);
1963 else
1964 nfs_inode_reclaim_delegation(state->inode,
1965 data->owner->so_cred,
1966 data->o_res.delegation_type,
1967 &data->o_res.delegation,
1968 data->o_res.pagemod_limit);
1969
1970 if (data->o_res.do_recall)
1971 nfs_async_inode_return_delegation(state->inode,
1972 &data->o_res.delegation);
1973 }
1974
1975 /*
1976 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1977 * and update the nfs4_state.
1978 */
1979 static struct nfs4_state *
1980 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1981 {
1982 struct inode *inode = data->state->inode;
1983 struct nfs4_state *state = data->state;
1984 int ret;
1985
1986 if (!data->rpc_done) {
1987 if (data->rpc_status)
1988 return ERR_PTR(data->rpc_status);
1989 /* cached opens have already been processed */
1990 goto update;
1991 }
1992
1993 ret = nfs_refresh_inode(inode, &data->f_attr);
1994 if (ret)
1995 return ERR_PTR(ret);
1996
1997 if (data->o_res.delegation_type != 0)
1998 nfs4_opendata_check_deleg(data, state);
1999 update:
2000 if (!update_open_stateid(state, &data->o_res.stateid,
2001 NULL, data->o_arg.fmode))
2002 return ERR_PTR(-EAGAIN);
2003 refcount_inc(&state->count);
2004
2005 return state;
2006 }
2007
2008 static struct inode *
2009 nfs4_opendata_get_inode(struct nfs4_opendata *data)
2010 {
2011 struct inode *inode;
2012
2013 switch (data->o_arg.claim) {
2014 case NFS4_OPEN_CLAIM_NULL:
2015 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
2016 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
2017 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
2018 return ERR_PTR(-EAGAIN);
2019 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
2020 &data->f_attr, data->f_label);
2021 break;
2022 default:
2023 inode = d_inode(data->dentry);
2024 ihold(inode);
2025 nfs_refresh_inode(inode, &data->f_attr);
2026 }
2027 return inode;
2028 }
2029
2030 static struct nfs4_state *
2031 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
2032 {
2033 struct nfs4_state *state;
2034 struct inode *inode;
2035
2036 inode = nfs4_opendata_get_inode(data);
2037 if (IS_ERR(inode))
2038 return ERR_CAST(inode);
2039 if (data->state != NULL && data->state->inode == inode) {
2040 state = data->state;
2041 refcount_inc(&state->count);
2042 } else
2043 state = nfs4_get_open_state(inode, data->owner);
2044 iput(inode);
2045 if (state == NULL)
2046 state = ERR_PTR(-ENOMEM);
2047 return state;
2048 }
2049
2050 static struct nfs4_state *
2051 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2052 {
2053 struct nfs4_state *state;
2054
2055 if (!data->rpc_done) {
2056 state = nfs4_try_open_cached(data);
2057 trace_nfs4_cached_open(data->state);
2058 goto out;
2059 }
2060
2061 state = nfs4_opendata_find_nfs4_state(data);
2062 if (IS_ERR(state))
2063 goto out;
2064
2065 if (data->o_res.delegation_type != 0)
2066 nfs4_opendata_check_deleg(data, state);
2067 if (!update_open_stateid(state, &data->o_res.stateid,
2068 NULL, data->o_arg.fmode)) {
2069 nfs4_put_open_state(state);
2070 state = ERR_PTR(-EAGAIN);
2071 }
2072 out:
2073 nfs_release_seqid(data->o_arg.seqid);
2074 return state;
2075 }
2076
2077 static struct nfs4_state *
2078 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2079 {
2080 struct nfs4_state *ret;
2081
2082 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
2083 ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
2084 else
2085 ret = _nfs4_opendata_to_nfs4_state(data);
2086 nfs4_sequence_free_slot(&data->o_res.seq_res);
2087 return ret;
2088 }
2089
2090 static struct nfs_open_context *
2091 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
2092 {
2093 struct nfs_inode *nfsi = NFS_I(state->inode);
2094 struct nfs_open_context *ctx;
2095
2096 rcu_read_lock();
2097 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
2098 if (ctx->state != state)
2099 continue;
2100 if ((ctx->mode & mode) != mode)
2101 continue;
2102 if (!get_nfs_open_context(ctx))
2103 continue;
2104 rcu_read_unlock();
2105 return ctx;
2106 }
2107 rcu_read_unlock();
2108 return ERR_PTR(-ENOENT);
2109 }
2110
2111 static struct nfs_open_context *
2112 nfs4_state_find_open_context(struct nfs4_state *state)
2113 {
2114 struct nfs_open_context *ctx;
2115
2116 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
2117 if (!IS_ERR(ctx))
2118 return ctx;
2119 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
2120 if (!IS_ERR(ctx))
2121 return ctx;
2122 return nfs4_state_find_open_context_mode(state, FMODE_READ);
2123 }
2124
2125 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
2126 struct nfs4_state *state, enum open_claim_type4 claim)
2127 {
2128 struct nfs4_opendata *opendata;
2129
2130 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
2131 NULL, claim, GFP_NOFS);
2132 if (opendata == NULL)
2133 return ERR_PTR(-ENOMEM);
2134 opendata->state = state;
2135 refcount_inc(&state->count);
2136 return opendata;
2137 }
2138
2139 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
2140 fmode_t fmode)
2141 {
2142 struct nfs4_state *newstate;
2143 int ret;
2144
2145 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
2146 return 0;
2147 opendata->o_arg.open_flags = 0;
2148 opendata->o_arg.fmode = fmode;
2149 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
2150 NFS_SB(opendata->dentry->d_sb),
2151 fmode, 0);
2152 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
2153 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
2154 nfs4_init_opendata_res(opendata);
2155 ret = _nfs4_recover_proc_open(opendata);
2156 if (ret != 0)
2157 return ret;
2158 newstate = nfs4_opendata_to_nfs4_state(opendata);
2159 if (IS_ERR(newstate))
2160 return PTR_ERR(newstate);
2161 if (newstate != opendata->state)
2162 ret = -ESTALE;
2163 nfs4_close_state(newstate, fmode);
2164 return ret;
2165 }
2166
2167 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
2168 {
2169 int ret;
2170
2171 /* memory barrier prior to reading state->n_* */
2172 smp_rmb();
2173 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2174 if (ret != 0)
2175 return ret;
2176 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2177 if (ret != 0)
2178 return ret;
2179 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
2180 if (ret != 0)
2181 return ret;
2182 /*
2183 * We may have performed cached opens for all three recoveries.
2184 * Check if we need to update the current stateid.
2185 */
2186 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
2187 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
2188 write_seqlock(&state->seqlock);
2189 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
2190 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2191 write_sequnlock(&state->seqlock);
2192 }
2193 return 0;
2194 }
2195
2196 /*
2197 * OPEN_RECLAIM:
2198 * reclaim state on the server after a reboot.
2199 */
2200 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2201 {
2202 struct nfs_delegation *delegation;
2203 struct nfs4_opendata *opendata;
2204 fmode_t delegation_type = 0;
2205 int status;
2206
2207 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2208 NFS4_OPEN_CLAIM_PREVIOUS);
2209 if (IS_ERR(opendata))
2210 return PTR_ERR(opendata);
2211 rcu_read_lock();
2212 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2213 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
2214 delegation_type = delegation->type;
2215 rcu_read_unlock();
2216 opendata->o_arg.u.delegation_type = delegation_type;
2217 status = nfs4_open_recover(opendata, state);
2218 nfs4_opendata_put(opendata);
2219 return status;
2220 }
2221
2222 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2223 {
2224 struct nfs_server *server = NFS_SERVER(state->inode);
2225 struct nfs4_exception exception = { };
2226 int err;
2227 do {
2228 err = _nfs4_do_open_reclaim(ctx, state);
2229 trace_nfs4_open_reclaim(ctx, 0, err);
2230 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2231 continue;
2232 if (err != -NFS4ERR_DELAY)
2233 break;
2234 nfs4_handle_exception(server, err, &exception);
2235 } while (exception.retry);
2236 return err;
2237 }
2238
2239 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
2240 {
2241 struct nfs_open_context *ctx;
2242 int ret;
2243
2244 ctx = nfs4_state_find_open_context(state);
2245 if (IS_ERR(ctx))
2246 return -EAGAIN;
2247 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2248 nfs_state_clear_open_state_flags(state);
2249 ret = nfs4_do_open_reclaim(ctx, state);
2250 put_nfs_open_context(ctx);
2251 return ret;
2252 }
2253
2254 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
2255 {
2256 switch (err) {
2257 default:
2258 printk(KERN_ERR "NFS: %s: unhandled error "
2259 "%d.\n", __func__, err);
2260 fallthrough;
2261 case 0:
2262 case -ENOENT:
2263 case -EAGAIN:
2264 case -ESTALE:
2265 case -ETIMEDOUT:
2266 break;
2267 case -NFS4ERR_BADSESSION:
2268 case -NFS4ERR_BADSLOT:
2269 case -NFS4ERR_BAD_HIGH_SLOT:
2270 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2271 case -NFS4ERR_DEADSESSION:
2272 return -EAGAIN;
2273 case -NFS4ERR_STALE_CLIENTID:
2274 case -NFS4ERR_STALE_STATEID:
2275 /* Don't recall a delegation if it was lost */
2276 nfs4_schedule_lease_recovery(server->nfs_client);
2277 return -EAGAIN;
2278 case -NFS4ERR_MOVED:
2279 nfs4_schedule_migration_recovery(server);
2280 return -EAGAIN;
2281 case -NFS4ERR_LEASE_MOVED:
2282 nfs4_schedule_lease_moved_recovery(server->nfs_client);
2283 return -EAGAIN;
2284 case -NFS4ERR_DELEG_REVOKED:
2285 case -NFS4ERR_ADMIN_REVOKED:
2286 case -NFS4ERR_EXPIRED:
2287 case -NFS4ERR_BAD_STATEID:
2288 case -NFS4ERR_OPENMODE:
2289 nfs_inode_find_state_and_recover(state->inode,
2290 stateid);
2291 nfs4_schedule_stateid_recovery(server, state);
2292 return -EAGAIN;
2293 case -NFS4ERR_DELAY:
2294 case -NFS4ERR_GRACE:
2295 ssleep(1);
2296 return -EAGAIN;
2297 case -ENOMEM:
2298 case -NFS4ERR_DENIED:
2299 if (fl) {
2300 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
2301 if (lsp)
2302 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2303 }
2304 return 0;
2305 }
2306 return err;
2307 }
2308
2309 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2310 struct nfs4_state *state, const nfs4_stateid *stateid)
2311 {
2312 struct nfs_server *server = NFS_SERVER(state->inode);
2313 struct nfs4_opendata *opendata;
2314 int err = 0;
2315
2316 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2317 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
2318 if (IS_ERR(opendata))
2319 return PTR_ERR(opendata);
2320 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2321 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
2322 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2323 if (err)
2324 goto out;
2325 }
2326 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
2327 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2328 if (err)
2329 goto out;
2330 }
2331 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
2332 err = nfs4_open_recover_helper(opendata, FMODE_READ);
2333 if (err)
2334 goto out;
2335 }
2336 nfs_state_clear_delegation(state);
2337 out:
2338 nfs4_opendata_put(opendata);
2339 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2340 }
2341
2342 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
2343 {
2344 struct nfs4_opendata *data = calldata;
2345
2346 nfs4_setup_sequence(data->o_arg.server->nfs_client,
2347 &data->c_arg.seq_args, &data->c_res.seq_res, task);
2348 }
2349
2350 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
2351 {
2352 struct nfs4_opendata *data = calldata;
2353
2354 nfs40_sequence_done(task, &data->c_res.seq_res);
2355
2356 data->rpc_status = task->tk_status;
2357 if (data->rpc_status == 0) {
2358 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
2359 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2360 renew_lease(data->o_res.server, data->timestamp);
2361 data->rpc_done = true;
2362 }
2363 }
2364
2365 static void nfs4_open_confirm_release(void *calldata)
2366 {
2367 struct nfs4_opendata *data = calldata;
2368 struct nfs4_state *state = NULL;
2369
2370 /* If this request hasn't been cancelled, do nothing */
2371 if (!data->cancelled)
2372 goto out_free;
2373 /* In case of error, no cleanup! */
2374 if (!data->rpc_done)
2375 goto out_free;
2376 state = nfs4_opendata_to_nfs4_state(data);
2377 if (!IS_ERR(state))
2378 nfs4_close_state(state, data->o_arg.fmode);
2379 out_free:
2380 nfs4_opendata_put(data);
2381 }
2382
2383 static const struct rpc_call_ops nfs4_open_confirm_ops = {
2384 .rpc_call_prepare = nfs4_open_confirm_prepare,
2385 .rpc_call_done = nfs4_open_confirm_done,
2386 .rpc_release = nfs4_open_confirm_release,
2387 };
2388
2389 /*
2390 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2391 */
2392 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2393 {
2394 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2395 struct rpc_task *task;
2396 struct rpc_message msg = {
2397 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2398 .rpc_argp = &data->c_arg,
2399 .rpc_resp = &data->c_res,
2400 .rpc_cred = data->owner->so_cred,
2401 };
2402 struct rpc_task_setup task_setup_data = {
2403 .rpc_client = server->client,
2404 .rpc_message = &msg,
2405 .callback_ops = &nfs4_open_confirm_ops,
2406 .callback_data = data,
2407 .workqueue = nfsiod_workqueue,
2408 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2409 };
2410 int status;
2411
2412 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1,
2413 data->is_recover);
2414 kref_get(&data->kref);
2415 data->rpc_done = false;
2416 data->rpc_status = 0;
2417 data->timestamp = jiffies;
2418 task = rpc_run_task(&task_setup_data);
2419 if (IS_ERR(task))
2420 return PTR_ERR(task);
2421 status = rpc_wait_for_completion_task(task);
2422 if (status != 0) {
2423 data->cancelled = true;
2424 smp_wmb();
2425 } else
2426 status = data->rpc_status;
2427 rpc_put_task(task);
2428 return status;
2429 }
2430
2431 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2432 {
2433 struct nfs4_opendata *data = calldata;
2434 struct nfs4_state_owner *sp = data->owner;
2435 struct nfs_client *clp = sp->so_server->nfs_client;
2436 enum open_claim_type4 claim = data->o_arg.claim;
2437
2438 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2439 goto out_wait;
2440 /*
2441 * Check if we still need to send an OPEN call, or if we can use
2442 * a delegation instead.
2443 */
2444 if (data->state != NULL) {
2445 struct nfs_delegation *delegation;
2446
2447 if (can_open_cached(data->state, data->o_arg.fmode,
2448 data->o_arg.open_flags, claim))
2449 goto out_no_action;
2450 rcu_read_lock();
2451 delegation = nfs4_get_valid_delegation(data->state->inode);
2452 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2453 goto unlock_no_action;
2454 rcu_read_unlock();
2455 }
2456 /* Update client id. */
2457 data->o_arg.clientid = clp->cl_clientid;
2458 switch (claim) {
2459 default:
2460 break;
2461 case NFS4_OPEN_CLAIM_PREVIOUS:
2462 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2463 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2464 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2465 fallthrough;
2466 case NFS4_OPEN_CLAIM_FH:
2467 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2468 }
2469 data->timestamp = jiffies;
2470 if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
2471 &data->o_arg.seq_args,
2472 &data->o_res.seq_res,
2473 task) != 0)
2474 nfs_release_seqid(data->o_arg.seqid);
2475
2476 /* Set the create mode (note dependency on the session type) */
2477 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2478 if (data->o_arg.open_flags & O_EXCL) {
2479 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2480 if (nfs4_has_persistent_session(clp))
2481 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2482 else if (clp->cl_mvops->minor_version > 0)
2483 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2484 }
2485 return;
2486 unlock_no_action:
2487 trace_nfs4_cached_open(data->state);
2488 rcu_read_unlock();
2489 out_no_action:
2490 task->tk_action = NULL;
2491 out_wait:
2492 nfs4_sequence_done(task, &data->o_res.seq_res);
2493 }
2494
2495 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2496 {
2497 struct nfs4_opendata *data = calldata;
2498
2499 data->rpc_status = task->tk_status;
2500
2501 if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2502 return;
2503
2504 if (task->tk_status == 0) {
2505 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2506 switch (data->o_res.f_attr->mode & S_IFMT) {
2507 case S_IFREG:
2508 break;
2509 case S_IFLNK:
2510 data->rpc_status = -ELOOP;
2511 break;
2512 case S_IFDIR:
2513 data->rpc_status = -EISDIR;
2514 break;
2515 default:
2516 data->rpc_status = -ENOTDIR;
2517 }
2518 }
2519 renew_lease(data->o_res.server, data->timestamp);
2520 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2521 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2522 }
2523 data->rpc_done = true;
2524 }
2525
2526 static void nfs4_open_release(void *calldata)
2527 {
2528 struct nfs4_opendata *data = calldata;
2529 struct nfs4_state *state = NULL;
2530
2531 /* If this request hasn't been cancelled, do nothing */
2532 if (!data->cancelled)
2533 goto out_free;
2534 /* In case of error, no cleanup! */
2535 if (data->rpc_status != 0 || !data->rpc_done)
2536 goto out_free;
2537 /* In case we need an open_confirm, no cleanup! */
2538 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2539 goto out_free;
2540 state = nfs4_opendata_to_nfs4_state(data);
2541 if (!IS_ERR(state))
2542 nfs4_close_state(state, data->o_arg.fmode);
2543 out_free:
2544 nfs4_opendata_put(data);
2545 }
2546
2547 static const struct rpc_call_ops nfs4_open_ops = {
2548 .rpc_call_prepare = nfs4_open_prepare,
2549 .rpc_call_done = nfs4_open_done,
2550 .rpc_release = nfs4_open_release,
2551 };
2552
2553 static int nfs4_run_open_task(struct nfs4_opendata *data,
2554 struct nfs_open_context *ctx)
2555 {
2556 struct inode *dir = d_inode(data->dir);
2557 struct nfs_server *server = NFS_SERVER(dir);
2558 struct nfs_openargs *o_arg = &data->o_arg;
2559 struct nfs_openres *o_res = &data->o_res;
2560 struct rpc_task *task;
2561 struct rpc_message msg = {
2562 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2563 .rpc_argp = o_arg,
2564 .rpc_resp = o_res,
2565 .rpc_cred = data->owner->so_cred,
2566 };
2567 struct rpc_task_setup task_setup_data = {
2568 .rpc_client = server->client,
2569 .rpc_message = &msg,
2570 .callback_ops = &nfs4_open_ops,
2571 .callback_data = data,
2572 .workqueue = nfsiod_workqueue,
2573 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2574 };
2575 int status;
2576
2577 if (server->nfs_client->cl_minorversion)
2578 task_setup_data.flags |= RPC_TASK_MOVEABLE;
2579
2580 kref_get(&data->kref);
2581 data->rpc_done = false;
2582 data->rpc_status = 0;
2583 data->cancelled = false;
2584 data->is_recover = false;
2585 if (!ctx) {
2586 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
2587 data->is_recover = true;
2588 task_setup_data.flags |= RPC_TASK_TIMEOUT;
2589 } else {
2590 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
2591 pnfs_lgopen_prepare(data, ctx);
2592 }
2593 task = rpc_run_task(&task_setup_data);
2594 if (IS_ERR(task))
2595 return PTR_ERR(task);
2596 status = rpc_wait_for_completion_task(task);
2597 if (status != 0) {
2598 data->cancelled = true;
2599 smp_wmb();
2600 } else
2601 status = data->rpc_status;
2602 rpc_put_task(task);
2603
2604 return status;
2605 }
2606
2607 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2608 {
2609 struct inode *dir = d_inode(data->dir);
2610 struct nfs_openres *o_res = &data->o_res;
2611 int status;
2612
2613 status = nfs4_run_open_task(data, NULL);
2614 if (status != 0 || !data->rpc_done)
2615 return status;
2616
2617 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2618
2619 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
2620 status = _nfs4_proc_open_confirm(data);
2621
2622 return status;
2623 }
2624
2625 /*
2626 * Additional permission checks in order to distinguish between an
2627 * open for read, and an open for execute. This works around the
2628 * fact that NFSv4 OPEN treats read and execute permissions as being
2629 * the same.
2630 * Note that in the non-execute case, we want to turn off permission
2631 * checking if we just created a new file (POSIX open() semantics).
2632 */
2633 static int nfs4_opendata_access(const struct cred *cred,
2634 struct nfs4_opendata *opendata,
2635 struct nfs4_state *state, fmode_t fmode,
2636 int openflags)
2637 {
2638 struct nfs_access_entry cache;
2639 u32 mask, flags;
2640
2641 /* access call failed or for some reason the server doesn't
2642 * support any access modes -- defer access call until later */
2643 if (opendata->o_res.access_supported == 0)
2644 return 0;
2645
2646 mask = 0;
2647 /*
2648 * Use openflags to check for exec, because fmode won't
2649 * always have FMODE_EXEC set when file open for exec.
2650 */
2651 if (openflags & __FMODE_EXEC) {
2652 /* ONLY check for exec rights */
2653 if (S_ISDIR(state->inode->i_mode))
2654 mask = NFS4_ACCESS_LOOKUP;
2655 else
2656 mask = NFS4_ACCESS_EXECUTE;
2657 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2658 mask = NFS4_ACCESS_READ;
2659
2660 cache.cred = cred;
2661 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2662 nfs_access_add_cache(state->inode, &cache);
2663
2664 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2665 if ((mask & ~cache.mask & flags) == 0)
2666 return 0;
2667
2668 return -EACCES;
2669 }
2670
2671 /*
2672 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2673 */
2674 static int _nfs4_proc_open(struct nfs4_opendata *data,
2675 struct nfs_open_context *ctx)
2676 {
2677 struct inode *dir = d_inode(data->dir);
2678 struct nfs_server *server = NFS_SERVER(dir);
2679 struct nfs_openargs *o_arg = &data->o_arg;
2680 struct nfs_openres *o_res = &data->o_res;
2681 int status;
2682
2683 status = nfs4_run_open_task(data, ctx);
2684 if (!data->rpc_done)
2685 return status;
2686 if (status != 0) {
2687 if (status == -NFS4ERR_BADNAME &&
2688 !(o_arg->open_flags & O_CREAT))
2689 return -ENOENT;
2690 return status;
2691 }
2692
2693 nfs_fattr_map_and_free_names(server, &data->f_attr);
2694
2695 if (o_arg->open_flags & O_CREAT) {
2696 if (o_arg->open_flags & O_EXCL)
2697 data->file_created = true;
2698 else if (o_res->cinfo.before != o_res->cinfo.after)
2699 data->file_created = true;
2700 if (data->file_created ||
2701 inode_peek_iversion_raw(dir) != o_res->cinfo.after)
2702 nfs4_update_changeattr(dir, &o_res->cinfo,
2703 o_res->f_attr->time_start,
2704 NFS_INO_INVALID_DATA);
2705 }
2706 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2707 server->caps &= ~NFS_CAP_POSIX_LOCK;
2708 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2709 status = _nfs4_proc_open_confirm(data);
2710 if (status != 0)
2711 return status;
2712 }
2713 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
2714 nfs4_sequence_free_slot(&o_res->seq_res);
2715 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr,
2716 o_res->f_label, NULL);
2717 }
2718 return 0;
2719 }
2720
2721 /*
2722 * OPEN_EXPIRED:
2723 * reclaim state on the server after a network partition.
2724 * Assumes caller holds the appropriate lock
2725 */
2726 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2727 {
2728 struct nfs4_opendata *opendata;
2729 int ret;
2730
2731 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2732 NFS4_OPEN_CLAIM_FH);
2733 if (IS_ERR(opendata))
2734 return PTR_ERR(opendata);
2735 ret = nfs4_open_recover(opendata, state);
2736 if (ret == -ESTALE)
2737 d_drop(ctx->dentry);
2738 nfs4_opendata_put(opendata);
2739 return ret;
2740 }
2741
2742 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2743 {
2744 struct nfs_server *server = NFS_SERVER(state->inode);
2745 struct nfs4_exception exception = { };
2746 int err;
2747
2748 do {
2749 err = _nfs4_open_expired(ctx, state);
2750 trace_nfs4_open_expired(ctx, 0, err);
2751 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2752 continue;
2753 switch (err) {
2754 default:
2755 goto out;
2756 case -NFS4ERR_GRACE:
2757 case -NFS4ERR_DELAY:
2758 nfs4_handle_exception(server, err, &exception);
2759 err = 0;
2760 }
2761 } while (exception.retry);
2762 out:
2763 return err;
2764 }
2765
2766 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2767 {
2768 struct nfs_open_context *ctx;
2769 int ret;
2770
2771 ctx = nfs4_state_find_open_context(state);
2772 if (IS_ERR(ctx))
2773 return -EAGAIN;
2774 ret = nfs4_do_open_expired(ctx, state);
2775 put_nfs_open_context(ctx);
2776 return ret;
2777 }
2778
2779 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2780 const nfs4_stateid *stateid)
2781 {
2782 nfs_remove_bad_delegation(state->inode, stateid);
2783 nfs_state_clear_delegation(state);
2784 }
2785
2786 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2787 {
2788 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2789 nfs_finish_clear_delegation_stateid(state, NULL);
2790 }
2791
2792 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2793 {
2794 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2795 nfs40_clear_delegation_stateid(state);
2796 nfs_state_clear_open_state_flags(state);
2797 return nfs4_open_expired(sp, state);
2798 }
2799
2800 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
2801 nfs4_stateid *stateid,
2802 const struct cred *cred)
2803 {
2804 return -NFS4ERR_BAD_STATEID;
2805 }
2806
2807 #if defined(CONFIG_NFS_V4_1)
2808 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
2809 nfs4_stateid *stateid,
2810 const struct cred *cred)
2811 {
2812 int status;
2813
2814 switch (stateid->type) {
2815 default:
2816 break;
2817 case NFS4_INVALID_STATEID_TYPE:
2818 case NFS4_SPECIAL_STATEID_TYPE:
2819 return -NFS4ERR_BAD_STATEID;
2820 case NFS4_REVOKED_STATEID_TYPE:
2821 goto out_free;
2822 }
2823
2824 status = nfs41_test_stateid(server, stateid, cred);
2825 switch (status) {
2826 case -NFS4ERR_EXPIRED:
2827 case -NFS4ERR_ADMIN_REVOKED:
2828 case -NFS4ERR_DELEG_REVOKED:
2829 break;
2830 default:
2831 return status;
2832 }
2833 out_free:
2834 /* Ack the revoked state to the server */
2835 nfs41_free_stateid(server, stateid, cred, true);
2836 return -NFS4ERR_EXPIRED;
2837 }
2838
2839 static int nfs41_check_delegation_stateid(struct nfs4_state *state)
2840 {
2841 struct nfs_server *server = NFS_SERVER(state->inode);
2842 nfs4_stateid stateid;
2843 struct nfs_delegation *delegation;
2844 const struct cred *cred = NULL;
2845 int status, ret = NFS_OK;
2846
2847 /* Get the delegation credential for use by test/free_stateid */
2848 rcu_read_lock();
2849 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2850 if (delegation == NULL) {
2851 rcu_read_unlock();
2852 nfs_state_clear_delegation(state);
2853 return NFS_OK;
2854 }
2855
2856 spin_lock(&delegation->lock);
2857 nfs4_stateid_copy(&stateid, &delegation->stateid);
2858
2859 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2860 &delegation->flags)) {
2861 spin_unlock(&delegation->lock);
2862 rcu_read_unlock();
2863 return NFS_OK;
2864 }
2865
2866 if (delegation->cred)
2867 cred = get_cred(delegation->cred);
2868 spin_unlock(&delegation->lock);
2869 rcu_read_unlock();
2870 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
2871 trace_nfs4_test_delegation_stateid(state, NULL, status);
2872 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2873 nfs_finish_clear_delegation_stateid(state, &stateid);
2874 else
2875 ret = status;
2876
2877 put_cred(cred);
2878 return ret;
2879 }
2880
2881 static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
2882 {
2883 nfs4_stateid tmp;
2884
2885 if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
2886 nfs4_copy_delegation_stateid(state->inode, state->state,
2887 &tmp, NULL) &&
2888 nfs4_stateid_match_other(&state->stateid, &tmp))
2889 nfs_state_set_delegation(state, &tmp, state->state);
2890 else
2891 nfs_state_clear_delegation(state);
2892 }
2893
2894 /**
2895 * nfs41_check_expired_locks - possibly free a lock stateid
2896 *
2897 * @state: NFSv4 state for an inode
2898 *
2899 * Returns NFS_OK if recovery for this stateid is now finished.
2900 * Otherwise a negative NFS4ERR value is returned.
2901 */
2902 static int nfs41_check_expired_locks(struct nfs4_state *state)
2903 {
2904 int status, ret = NFS_OK;
2905 struct nfs4_lock_state *lsp, *prev = NULL;
2906 struct nfs_server *server = NFS_SERVER(state->inode);
2907
2908 if (!test_bit(LK_STATE_IN_USE, &state->flags))
2909 goto out;
2910
2911 spin_lock(&state->state_lock);
2912 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2913 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
2914 const struct cred *cred = lsp->ls_state->owner->so_cred;
2915
2916 refcount_inc(&lsp->ls_count);
2917 spin_unlock(&state->state_lock);
2918
2919 nfs4_put_lock_state(prev);
2920 prev = lsp;
2921
2922 status = nfs41_test_and_free_expired_stateid(server,
2923 &lsp->ls_stateid,
2924 cred);
2925 trace_nfs4_test_lock_stateid(state, lsp, status);
2926 if (status == -NFS4ERR_EXPIRED ||
2927 status == -NFS4ERR_BAD_STATEID) {
2928 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
2929 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
2930 if (!recover_lost_locks)
2931 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2932 } else if (status != NFS_OK) {
2933 ret = status;
2934 nfs4_put_lock_state(prev);
2935 goto out;
2936 }
2937 spin_lock(&state->state_lock);
2938 }
2939 }
2940 spin_unlock(&state->state_lock);
2941 nfs4_put_lock_state(prev);
2942 out:
2943 return ret;
2944 }
2945
2946 /**
2947 * nfs41_check_open_stateid - possibly free an open stateid
2948 *
2949 * @state: NFSv4 state for an inode
2950 *
2951 * Returns NFS_OK if recovery for this stateid is now finished.
2952 * Otherwise a negative NFS4ERR value is returned.
2953 */
2954 static int nfs41_check_open_stateid(struct nfs4_state *state)
2955 {
2956 struct nfs_server *server = NFS_SERVER(state->inode);
2957 nfs4_stateid *stateid = &state->open_stateid;
2958 const struct cred *cred = state->owner->so_cred;
2959 int status;
2960
2961 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
2962 return -NFS4ERR_BAD_STATEID;
2963 status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
2964 trace_nfs4_test_open_stateid(state, NULL, status);
2965 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
2966 nfs_state_clear_open_state_flags(state);
2967 stateid->type = NFS4_INVALID_STATEID_TYPE;
2968 return status;
2969 }
2970 if (nfs_open_stateid_recover_openmode(state))
2971 return -NFS4ERR_OPENMODE;
2972 return NFS_OK;
2973 }
2974
2975 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2976 {
2977 int status;
2978
2979 status = nfs41_check_delegation_stateid(state);
2980 if (status != NFS_OK)
2981 return status;
2982 nfs41_delegation_recover_stateid(state);
2983
2984 status = nfs41_check_expired_locks(state);
2985 if (status != NFS_OK)
2986 return status;
2987 status = nfs41_check_open_stateid(state);
2988 if (status != NFS_OK)
2989 status = nfs4_open_expired(sp, state);
2990 return status;
2991 }
2992 #endif
2993
2994 /*
2995 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2996 * fields corresponding to attributes that were used to store the verifier.
2997 * Make sure we clobber those fields in the later setattr call
2998 */
2999 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
3000 struct iattr *sattr, struct nfs4_label **label)
3001 {
3002 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
3003 __u32 attrset[3];
3004 unsigned ret;
3005 unsigned i;
3006
3007 for (i = 0; i < ARRAY_SIZE(attrset); i++) {
3008 attrset[i] = opendata->o_res.attrset[i];
3009 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
3010 attrset[i] &= ~bitmask[i];
3011 }
3012
3013 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
3014 sattr->ia_valid : 0;
3015
3016 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
3017 if (sattr->ia_valid & ATTR_ATIME_SET)
3018 ret |= ATTR_ATIME_SET;
3019 else
3020 ret |= ATTR_ATIME;
3021 }
3022
3023 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
3024 if (sattr->ia_valid & ATTR_MTIME_SET)
3025 ret |= ATTR_MTIME_SET;
3026 else
3027 ret |= ATTR_MTIME;
3028 }
3029
3030 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
3031 *label = NULL;
3032 return ret;
3033 }
3034
3035 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3036 int flags, struct nfs_open_context *ctx)
3037 {
3038 struct nfs4_state_owner *sp = opendata->owner;
3039 struct nfs_server *server = sp->so_server;
3040 struct dentry *dentry;
3041 struct nfs4_state *state;
3042 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
3043 struct inode *dir = d_inode(opendata->dir);
3044 unsigned long dir_verifier;
3045 unsigned int seq;
3046 int ret;
3047
3048 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
3049 dir_verifier = nfs_save_change_attribute(dir);
3050
3051 ret = _nfs4_proc_open(opendata, ctx);
3052 if (ret != 0)
3053 goto out;
3054
3055 state = _nfs4_opendata_to_nfs4_state(opendata);
3056 ret = PTR_ERR(state);
3057 if (IS_ERR(state))
3058 goto out;
3059 ctx->state = state;
3060 if (server->caps & NFS_CAP_POSIX_LOCK)
3061 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
3062 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
3063 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
3064
3065 dentry = opendata->dentry;
3066 if (d_really_is_negative(dentry)) {
3067 struct dentry *alias;
3068 d_drop(dentry);
3069 alias = d_exact_alias(dentry, state->inode);
3070 if (!alias)
3071 alias = d_splice_alias(igrab(state->inode), dentry);
3072 /* d_splice_alias() can't fail here - it's a non-directory */
3073 if (alias) {
3074 dput(ctx->dentry);
3075 ctx->dentry = dentry = alias;
3076 }
3077 }
3078
3079 switch(opendata->o_arg.claim) {
3080 default:
3081 break;
3082 case NFS4_OPEN_CLAIM_NULL:
3083 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
3084 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
3085 if (!opendata->rpc_done)
3086 break;
3087 if (opendata->o_res.delegation_type != 0)
3088 dir_verifier = nfs_save_change_attribute(dir);
3089 nfs_set_verifier(dentry, dir_verifier);
3090 }
3091
3092 /* Parse layoutget results before we check for access */
3093 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
3094
3095 ret = nfs4_opendata_access(sp->so_cred, opendata, state,
3096 acc_mode, flags);
3097 if (ret != 0)
3098 goto out;
3099
3100 if (d_inode(dentry) == state->inode) {
3101 nfs_inode_attach_open_context(ctx);
3102 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
3103 nfs4_schedule_stateid_recovery(server, state);
3104 }
3105
3106 out:
3107 if (!opendata->cancelled)
3108 nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3109 return ret;
3110 }
3111
3112 /*
3113 * Returns a referenced nfs4_state
3114 */
3115 static int _nfs4_do_open(struct inode *dir,
3116 struct nfs_open_context *ctx,
3117 int flags,
3118 const struct nfs4_open_createattrs *c,
3119 int *opened)
3120 {
3121 struct nfs4_state_owner *sp;
3122 struct nfs4_state *state = NULL;
3123 struct nfs_server *server = NFS_SERVER(dir);
3124 struct nfs4_opendata *opendata;
3125 struct dentry *dentry = ctx->dentry;
3126 const struct cred *cred = ctx->cred;
3127 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
3128 fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
3129 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
3130 struct iattr *sattr = c->sattr;
3131 struct nfs4_label *label = c->label;
3132 struct nfs4_label *olabel = NULL;
3133 int status;
3134
3135 /* Protect against reboot recovery conflicts */
3136 status = -ENOMEM;
3137 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
3138 if (sp == NULL) {
3139 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
3140 goto out_err;
3141 }
3142 status = nfs4_client_recover_expired_lease(server->nfs_client);
3143 if (status != 0)
3144 goto err_put_state_owner;
3145 if (d_really_is_positive(dentry))
3146 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
3147 status = -ENOMEM;
3148 if (d_really_is_positive(dentry))
3149 claim = NFS4_OPEN_CLAIM_FH;
3150 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
3151 c, claim, GFP_KERNEL);
3152 if (opendata == NULL)
3153 goto err_put_state_owner;
3154
3155 if (label) {
3156 olabel = nfs4_label_alloc(server, GFP_KERNEL);
3157 if (IS_ERR(olabel)) {
3158 status = PTR_ERR(olabel);
3159 goto err_opendata_put;
3160 }
3161 }
3162
3163 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
3164 if (!opendata->f_attr.mdsthreshold) {
3165 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
3166 if (!opendata->f_attr.mdsthreshold)
3167 goto err_free_label;
3168 }
3169 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
3170 }
3171 if (d_really_is_positive(dentry))
3172 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
3173
3174 status = _nfs4_open_and_get_state(opendata, flags, ctx);
3175 if (status != 0)
3176 goto err_free_label;
3177 state = ctx->state;
3178
3179 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
3180 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
3181 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
3182 /*
3183 * send create attributes which was not set by open
3184 * with an extra setattr.
3185 */
3186 if (attrs || label) {
3187 unsigned ia_old = sattr->ia_valid;
3188
3189 sattr->ia_valid = attrs;
3190 nfs_fattr_init(opendata->o_res.f_attr);
3191 status = nfs4_do_setattr(state->inode, cred,
3192 opendata->o_res.f_attr, sattr,
3193 ctx, label, olabel);
3194 if (status == 0) {
3195 nfs_setattr_update_inode(state->inode, sattr,
3196 opendata->o_res.f_attr);
3197 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
3198 }
3199 sattr->ia_valid = ia_old;
3200 }
3201 }
3202 if (opened && opendata->file_created)
3203 *opened = 1;
3204
3205 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
3206 *ctx_th = opendata->f_attr.mdsthreshold;
3207 opendata->f_attr.mdsthreshold = NULL;
3208 }
3209
3210 nfs4_label_free(olabel);
3211
3212 nfs4_opendata_put(opendata);
3213 nfs4_put_state_owner(sp);
3214 return 0;
3215 err_free_label:
3216 nfs4_label_free(olabel);
3217 err_opendata_put:
3218 nfs4_opendata_put(opendata);
3219 err_put_state_owner:
3220 nfs4_put_state_owner(sp);
3221 out_err:
3222 return status;
3223 }
3224
3225
3226 static struct nfs4_state *nfs4_do_open(struct inode *dir,
3227 struct nfs_open_context *ctx,
3228 int flags,
3229 struct iattr *sattr,
3230 struct nfs4_label *label,
3231 int *opened)
3232 {
3233 struct nfs_server *server = NFS_SERVER(dir);
3234 struct nfs4_exception exception = {
3235 .interruptible = true,
3236 };
3237 struct nfs4_state *res;
3238 struct nfs4_open_createattrs c = {
3239 .label = label,
3240 .sattr = sattr,
3241 .verf = {
3242 [0] = (__u32)jiffies,
3243 [1] = (__u32)current->pid,
3244 },
3245 };
3246 int status;
3247
3248 do {
3249 status = _nfs4_do_open(dir, ctx, flags, &c, opened);
3250 res = ctx->state;
3251 trace_nfs4_open_file(ctx, flags, status);
3252 if (status == 0)
3253 break;
3254 /* NOTE: BAD_SEQID means the server and client disagree about the
3255 * book-keeping w.r.t. state-changing operations
3256 * (OPEN/CLOSE/LOCK/LOCKU...)
3257 * It is actually a sign of a bug on the client or on the server.
3258 *
3259 * If we receive a BAD_SEQID error in the particular case of
3260 * doing an OPEN, we assume that nfs_increment_open_seqid() will
3261 * have unhashed the old state_owner for us, and that we can
3262 * therefore safely retry using a new one. We should still warn
3263 * the user though...
3264 */
3265 if (status == -NFS4ERR_BAD_SEQID) {
3266 pr_warn_ratelimited("NFS: v4 server %s "
3267 " returned a bad sequence-id error!\n",
3268 NFS_SERVER(dir)->nfs_client->cl_hostname);
3269 exception.retry = 1;
3270 continue;
3271 }
3272 /*
3273 * BAD_STATEID on OPEN means that the server cancelled our
3274 * state before it received the OPEN_CONFIRM.
3275 * Recover by retrying the request as per the discussion
3276 * on Page 181 of RFC3530.
3277 */
3278 if (status == -NFS4ERR_BAD_STATEID) {
3279 exception.retry = 1;
3280 continue;
3281 }
3282 if (status == -NFS4ERR_EXPIRED) {
3283 nfs4_schedule_lease_recovery(server->nfs_client);
3284 exception.retry = 1;
3285 continue;
3286 }
3287 if (status == -EAGAIN) {
3288 /* We must have found a delegation */
3289 exception.retry = 1;
3290 continue;
3291 }
3292 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
3293 continue;
3294 res = ERR_PTR(nfs4_handle_exception(server,
3295 status, &exception));
3296 } while (exception.retry);
3297 return res;
3298 }
3299
3300 static int _nfs4_do_setattr(struct inode *inode,
3301 struct nfs_setattrargs *arg,
3302 struct nfs_setattrres *res,
3303 const struct cred *cred,
3304 struct nfs_open_context *ctx)
3305 {
3306 struct nfs_server *server = NFS_SERVER(inode);
3307 struct rpc_message msg = {
3308 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
3309 .rpc_argp = arg,
3310 .rpc_resp = res,
3311 .rpc_cred = cred,
3312 };
3313 const struct cred *delegation_cred = NULL;
3314 unsigned long timestamp = jiffies;
3315 bool truncate;
3316 int status;
3317
3318 nfs_fattr_init(res->fattr);
3319
3320 /* Servers should only apply open mode checks for file size changes */
3321 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
3322 if (!truncate) {
3323 nfs4_inode_make_writeable(inode);
3324 goto zero_stateid;
3325 }
3326
3327 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
3328 /* Use that stateid */
3329 } else if (ctx != NULL && ctx->state) {
3330 struct nfs_lock_context *l_ctx;
3331 if (!nfs4_valid_open_stateid(ctx->state))
3332 return -EBADF;
3333 l_ctx = nfs_get_lock_context(ctx);
3334 if (IS_ERR(l_ctx))
3335 return PTR_ERR(l_ctx);
3336 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
3337 &arg->stateid, &delegation_cred);
3338 nfs_put_lock_context(l_ctx);
3339 if (status == -EIO)
3340 return -EBADF;
3341 else if (status == -EAGAIN)
3342 goto zero_stateid;
3343 } else {
3344 zero_stateid:
3345 nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3346 }
3347 if (delegation_cred)
3348 msg.rpc_cred = delegation_cred;
3349
3350 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
3351
3352 put_cred(delegation_cred);
3353 if (status == 0 && ctx != NULL)
3354 renew_lease(server, timestamp);
3355 trace_nfs4_setattr(inode, &arg->stateid, status);
3356 return status;
3357 }
3358
3359 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
3360 struct nfs_fattr *fattr, struct iattr *sattr,
3361 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
3362 struct nfs4_label *olabel)
3363 {
3364 struct nfs_server *server = NFS_SERVER(inode);
3365 __u32 bitmask[NFS4_BITMASK_SZ];
3366 struct nfs4_state *state = ctx ? ctx->state : NULL;
3367 struct nfs_setattrargs arg = {
3368 .fh = NFS_FH(inode),
3369 .iap = sattr,
3370 .server = server,
3371 .bitmask = bitmask,
3372 .label = ilabel,
3373 };
3374 struct nfs_setattrres res = {
3375 .fattr = fattr,
3376 .label = olabel,
3377 .server = server,
3378 };
3379 struct nfs4_exception exception = {
3380 .state = state,
3381 .inode = inode,
3382 .stateid = &arg.stateid,
3383 };
3384 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE;
3385 int err;
3386
3387 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID))
3388 adjust_flags |= NFS_INO_INVALID_MODE;
3389 if (sattr->ia_valid & (ATTR_UID | ATTR_GID))
3390 adjust_flags |= NFS_INO_INVALID_OTHER;
3391
3392 do {
3393 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, olabel),
3394 inode, adjust_flags);
3395
3396 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
3397 switch (err) {
3398 case -NFS4ERR_OPENMODE:
3399 if (!(sattr->ia_valid & ATTR_SIZE)) {
3400 pr_warn_once("NFSv4: server %s is incorrectly "
3401 "applying open mode checks to "
3402 "a SETATTR that is not "
3403 "changing file size.\n",
3404 server->nfs_client->cl_hostname);
3405 }
3406 if (state && !(state->state & FMODE_WRITE)) {
3407 err = -EBADF;
3408 if (sattr->ia_valid & ATTR_OPEN)
3409 err = -EACCES;
3410 goto out;
3411 }
3412 }
3413 err = nfs4_handle_exception(server, err, &exception);
3414 } while (exception.retry);
3415 out:
3416 return err;
3417 }
3418
3419 static bool
3420 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
3421 {
3422 if (inode == NULL || !nfs_have_layout(inode))
3423 return false;
3424
3425 return pnfs_wait_on_layoutreturn(inode, task);
3426 }
3427
3428 /*
3429 * Update the seqid of an open stateid
3430 */
3431 static void nfs4_sync_open_stateid(nfs4_stateid *dst,
3432 struct nfs4_state *state)
3433 {
3434 __be32 seqid_open;
3435 u32 dst_seqid;
3436 int seq;
3437
3438 for (;;) {
3439 if (!nfs4_valid_open_stateid(state))
3440 break;
3441 seq = read_seqbegin(&state->seqlock);
3442 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3443 nfs4_stateid_copy(dst, &state->open_stateid);
3444 if (read_seqretry(&state->seqlock, seq))
3445 continue;
3446 break;
3447 }
3448 seqid_open = state->open_stateid.seqid;
3449 if (read_seqretry(&state->seqlock, seq))
3450 continue;
3451
3452 dst_seqid = be32_to_cpu(dst->seqid);
3453 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
3454 dst->seqid = seqid_open;
3455 break;
3456 }
3457 }
3458
3459 /*
3460 * Update the seqid of an open stateid after receiving
3461 * NFS4ERR_OLD_STATEID
3462 */
3463 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
3464 struct nfs4_state *state)
3465 {
3466 __be32 seqid_open;
3467 u32 dst_seqid;
3468 bool ret;
3469 int seq, status = -EAGAIN;
3470 DEFINE_WAIT(wait);
3471
3472 for (;;) {
3473 ret = false;
3474 if (!nfs4_valid_open_stateid(state))
3475 break;
3476 seq = read_seqbegin(&state->seqlock);
3477 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3478 if (read_seqretry(&state->seqlock, seq))
3479 continue;
3480 break;
3481 }
3482
3483 write_seqlock(&state->seqlock);
3484 seqid_open = state->open_stateid.seqid;
3485
3486 dst_seqid = be32_to_cpu(dst->seqid);
3487
3488 /* Did another OPEN bump the state's seqid? try again: */
3489 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
3490 dst->seqid = seqid_open;
3491 write_sequnlock(&state->seqlock);
3492 ret = true;
3493 break;
3494 }
3495
3496 /* server says we're behind but we haven't seen the update yet */
3497 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
3498 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
3499 write_sequnlock(&state->seqlock);
3500 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
3501
3502 if (fatal_signal_pending(current))
3503 status = -EINTR;
3504 else
3505 if (schedule_timeout(5*HZ) != 0)
3506 status = 0;
3507
3508 finish_wait(&state->waitq, &wait);
3509
3510 if (!status)
3511 continue;
3512 if (status == -EINTR)
3513 break;
3514
3515 /* we slept the whole 5 seconds, we must have lost a seqid */
3516 dst->seqid = cpu_to_be32(dst_seqid + 1);
3517 ret = true;
3518 break;
3519 }
3520
3521 return ret;
3522 }
3523
3524 struct nfs4_closedata {
3525 struct inode *inode;
3526 struct nfs4_state *state;
3527 struct nfs_closeargs arg;
3528 struct nfs_closeres res;
3529 struct {
3530 struct nfs4_layoutreturn_args arg;
3531 struct nfs4_layoutreturn_res res;
3532 struct nfs4_xdr_opaque_data ld_private;
3533 u32 roc_barrier;
3534 bool roc;
3535 } lr;
3536 struct nfs_fattr fattr;
3537 unsigned long timestamp;
3538 };
3539
3540 static void nfs4_free_closedata(void *data)
3541 {
3542 struct nfs4_closedata *calldata = data;
3543 struct nfs4_state_owner *sp = calldata->state->owner;
3544 struct super_block *sb = calldata->state->inode->i_sb;
3545
3546 if (calldata->lr.roc)
3547 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
3548 calldata->res.lr_ret);
3549 nfs4_put_open_state(calldata->state);
3550 nfs_free_seqid(calldata->arg.seqid);
3551 nfs4_put_state_owner(sp);
3552 nfs_sb_deactive(sb);
3553 kfree(calldata);
3554 }
3555
3556 static void nfs4_close_done(struct rpc_task *task, void *data)
3557 {
3558 struct nfs4_closedata *calldata = data;
3559 struct nfs4_state *state = calldata->state;
3560 struct nfs_server *server = NFS_SERVER(calldata->inode);
3561 nfs4_stateid *res_stateid = NULL;
3562 struct nfs4_exception exception = {
3563 .state = state,
3564 .inode = calldata->inode,
3565 .stateid = &calldata->arg.stateid,
3566 };
3567
3568 dprintk("%s: begin!\n", __func__);
3569 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3570 return;
3571 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
3572
3573 /* Handle Layoutreturn errors */
3574 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
3575 &calldata->res.lr_ret) == -EAGAIN)
3576 goto out_restart;
3577
3578 /* hmm. we are done with the inode, and in the process of freeing
3579 * the state_owner. we keep this around to process errors
3580 */
3581 switch (task->tk_status) {
3582 case 0:
3583 res_stateid = &calldata->res.stateid;
3584 renew_lease(server, calldata->timestamp);
3585 break;
3586 case -NFS4ERR_ACCESS:
3587 if (calldata->arg.bitmask != NULL) {
3588 calldata->arg.bitmask = NULL;
3589 calldata->res.fattr = NULL;
3590 goto out_restart;
3591
3592 }
3593 break;
3594 case -NFS4ERR_OLD_STATEID:
3595 /* Did we race with OPEN? */
3596 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
3597 state))
3598 goto out_restart;
3599 goto out_release;
3600 case -NFS4ERR_ADMIN_REVOKED:
3601 case -NFS4ERR_STALE_STATEID:
3602 case -NFS4ERR_EXPIRED:
3603 nfs4_free_revoked_stateid(server,
3604 &calldata->arg.stateid,
3605 task->tk_msg.rpc_cred);
3606 fallthrough;
3607 case -NFS4ERR_BAD_STATEID:
3608 if (calldata->arg.fmode == 0)
3609 break;
3610 fallthrough;
3611 default:
3612 task->tk_status = nfs4_async_handle_exception(task,
3613 server, task->tk_status, &exception);
3614 if (exception.retry)
3615 goto out_restart;
3616 }
3617 nfs_clear_open_stateid(state, &calldata->arg.stateid,
3618 res_stateid, calldata->arg.fmode);
3619 out_release:
3620 task->tk_status = 0;
3621 nfs_release_seqid(calldata->arg.seqid);
3622 nfs_refresh_inode(calldata->inode, &calldata->fattr);
3623 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
3624 return;
3625 out_restart:
3626 task->tk_status = 0;
3627 rpc_restart_call_prepare(task);
3628 goto out_release;
3629 }
3630
3631 static void nfs4_close_prepare(struct rpc_task *task, void *data)
3632 {
3633 struct nfs4_closedata *calldata = data;
3634 struct nfs4_state *state = calldata->state;
3635 struct inode *inode = calldata->inode;
3636 struct nfs_server *server = NFS_SERVER(inode);
3637 struct pnfs_layout_hdr *lo;
3638 bool is_rdonly, is_wronly, is_rdwr;
3639 int call_close = 0;
3640
3641 dprintk("%s: begin!\n", __func__);
3642 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3643 goto out_wait;
3644
3645 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
3646 spin_lock(&state->owner->so_lock);
3647 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
3648 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
3649 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
3650 /* Calculate the change in open mode */
3651 calldata->arg.fmode = 0;
3652 if (state->n_rdwr == 0) {
3653 if (state->n_rdonly == 0)
3654 call_close |= is_rdonly;
3655 else if (is_rdonly)
3656 calldata->arg.fmode |= FMODE_READ;
3657 if (state->n_wronly == 0)
3658 call_close |= is_wronly;
3659 else if (is_wronly)
3660 calldata->arg.fmode |= FMODE_WRITE;
3661 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3662 call_close |= is_rdwr;
3663 } else if (is_rdwr)
3664 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3665
3666 nfs4_sync_open_stateid(&calldata->arg.stateid, state);
3667 if (!nfs4_valid_open_stateid(state))
3668 call_close = 0;
3669 spin_unlock(&state->owner->so_lock);
3670
3671 if (!call_close) {
3672 /* Note: exit _without_ calling nfs4_close_done */
3673 goto out_no_action;
3674 }
3675
3676 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
3677 nfs_release_seqid(calldata->arg.seqid);
3678 goto out_wait;
3679 }
3680
3681 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
3682 if (lo && !pnfs_layout_is_valid(lo)) {
3683 calldata->arg.lr_args = NULL;
3684 calldata->res.lr_res = NULL;
3685 }
3686
3687 if (calldata->arg.fmode == 0)
3688 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
3689
3690 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
3691 /* Close-to-open cache consistency revalidation */
3692 if (!nfs4_have_delegation(inode, FMODE_READ)) {
3693 nfs4_bitmask_set(calldata->arg.bitmask_store,
3694 server->cache_consistency_bitmask,
3695 inode, server, NULL);
3696 calldata->arg.bitmask = calldata->arg.bitmask_store;
3697 } else
3698 calldata->arg.bitmask = NULL;
3699 }
3700
3701 calldata->arg.share_access =
3702 nfs4_map_atomic_open_share(NFS_SERVER(inode),
3703 calldata->arg.fmode, 0);
3704
3705 if (calldata->res.fattr == NULL)
3706 calldata->arg.bitmask = NULL;
3707 else if (calldata->arg.bitmask == NULL)
3708 calldata->res.fattr = NULL;
3709 calldata->timestamp = jiffies;
3710 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
3711 &calldata->arg.seq_args,
3712 &calldata->res.seq_res,
3713 task) != 0)
3714 nfs_release_seqid(calldata->arg.seqid);
3715 dprintk("%s: done!\n", __func__);
3716 return;
3717 out_no_action:
3718 task->tk_action = NULL;
3719 out_wait:
3720 nfs4_sequence_done(task, &calldata->res.seq_res);
3721 }
3722
3723 static const struct rpc_call_ops nfs4_close_ops = {
3724 .rpc_call_prepare = nfs4_close_prepare,
3725 .rpc_call_done = nfs4_close_done,
3726 .rpc_release = nfs4_free_closedata,
3727 };
3728
3729 /*
3730 * It is possible for data to be read/written from a mem-mapped file
3731 * after the sys_close call (which hits the vfs layer as a flush).
3732 * This means that we can't safely call nfsv4 close on a file until
3733 * the inode is cleared. This in turn means that we are not good
3734 * NFSv4 citizens - we do not indicate to the server to update the file's
3735 * share state even when we are done with one of the three share
3736 * stateid's in the inode.
3737 *
3738 * NOTE: Caller must be holding the sp->so_owner semaphore!
3739 */
3740 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3741 {
3742 struct nfs_server *server = NFS_SERVER(state->inode);
3743 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3744 struct nfs4_closedata *calldata;
3745 struct nfs4_state_owner *sp = state->owner;
3746 struct rpc_task *task;
3747 struct rpc_message msg = {
3748 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3749 .rpc_cred = state->owner->so_cred,
3750 };
3751 struct rpc_task_setup task_setup_data = {
3752 .rpc_client = server->client,
3753 .rpc_message = &msg,
3754 .callback_ops = &nfs4_close_ops,
3755 .workqueue = nfsiod_workqueue,
3756 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
3757 };
3758 int status = -ENOMEM;
3759
3760 if (server->nfs_client->cl_minorversion)
3761 task_setup_data.flags |= RPC_TASK_MOVEABLE;
3762
3763 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3764 &task_setup_data.rpc_client, &msg);
3765
3766 calldata = kzalloc(sizeof(*calldata), gfp_mask);
3767 if (calldata == NULL)
3768 goto out;
3769 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0);
3770 calldata->inode = state->inode;
3771 calldata->state = state;
3772 calldata->arg.fh = NFS_FH(state->inode);
3773 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
3774 goto out_free_calldata;
3775 /* Serialization for the sequence id */
3776 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3777 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3778 if (IS_ERR(calldata->arg.seqid))
3779 goto out_free_calldata;
3780 nfs_fattr_init(&calldata->fattr);
3781 calldata->arg.fmode = 0;
3782 calldata->lr.arg.ld_private = &calldata->lr.ld_private;
3783 calldata->res.fattr = &calldata->fattr;
3784 calldata->res.seqid = calldata->arg.seqid;
3785 calldata->res.server = server;
3786 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3787 calldata->lr.roc = pnfs_roc(state->inode,
3788 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
3789 if (calldata->lr.roc) {
3790 calldata->arg.lr_args = &calldata->lr.arg;
3791 calldata->res.lr_res = &calldata->lr.res;
3792 }
3793 nfs_sb_active(calldata->inode->i_sb);
3794
3795 msg.rpc_argp = &calldata->arg;
3796 msg.rpc_resp = &calldata->res;
3797 task_setup_data.callback_data = calldata;
3798 task = rpc_run_task(&task_setup_data);
3799 if (IS_ERR(task))
3800 return PTR_ERR(task);
3801 status = 0;
3802 if (wait)
3803 status = rpc_wait_for_completion_task(task);
3804 rpc_put_task(task);
3805 return status;
3806 out_free_calldata:
3807 kfree(calldata);
3808 out:
3809 nfs4_put_open_state(state);
3810 nfs4_put_state_owner(sp);
3811 return status;
3812 }
3813
3814 static struct inode *
3815 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3816 int open_flags, struct iattr *attr, int *opened)
3817 {
3818 struct nfs4_state *state;
3819 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
3820
3821 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3822
3823 /* Protect against concurrent sillydeletes */
3824 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3825
3826 nfs4_label_release_security(label);
3827
3828 if (IS_ERR(state))
3829 return ERR_CAST(state);
3830 return state->inode;
3831 }
3832
3833 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3834 {
3835 if (ctx->state == NULL)
3836 return;
3837 if (is_sync)
3838 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
3839 else
3840 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
3841 }
3842
3843 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3844 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3845 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL)
3846
3847 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3848 {
3849 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3850 struct nfs4_server_caps_arg args = {
3851 .fhandle = fhandle,
3852 .bitmask = bitmask,
3853 };
3854 struct nfs4_server_caps_res res = {};
3855 struct rpc_message msg = {
3856 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3857 .rpc_argp = &args,
3858 .rpc_resp = &res,
3859 };
3860 int status;
3861 int i;
3862
3863 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3864 FATTR4_WORD0_FH_EXPIRE_TYPE |
3865 FATTR4_WORD0_LINK_SUPPORT |
3866 FATTR4_WORD0_SYMLINK_SUPPORT |
3867 FATTR4_WORD0_ACLSUPPORT;
3868 if (minorversion)
3869 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3870
3871 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3872 if (status == 0) {
3873 /* Sanity check the server answers */
3874 switch (minorversion) {
3875 case 0:
3876 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3877 res.attr_bitmask[2] = 0;
3878 break;
3879 case 1:
3880 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3881 break;
3882 case 2:
3883 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3884 }
3885 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3886 server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
3887 NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
3888 server->fattr_valid = NFS_ATTR_FATTR_V4;
3889 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3890 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3891 server->caps |= NFS_CAP_ACLS;
3892 if (res.has_links != 0)
3893 server->caps |= NFS_CAP_HARDLINKS;
3894 if (res.has_symlinks != 0)
3895 server->caps |= NFS_CAP_SYMLINKS;
3896 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3897 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3898 server->caps |= NFS_CAP_SECURITY_LABEL;
3899 #endif
3900 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
3901 server->caps |= NFS_CAP_FS_LOCATIONS;
3902 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
3903 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
3904 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
3905 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE;
3906 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS))
3907 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK;
3908 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER))
3909 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER |
3910 NFS_ATTR_FATTR_OWNER_NAME);
3911 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP))
3912 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP |
3913 NFS_ATTR_FATTR_GROUP_NAME);
3914 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED))
3915 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED;
3916 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS))
3917 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME;
3918 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA))
3919 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
3920 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
3921 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
3922 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3923 sizeof(server->attr_bitmask));
3924 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3925
3926 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3927 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3928 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3929 server->cache_consistency_bitmask[2] = 0;
3930
3931 /* Avoid a regression due to buggy server */
3932 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
3933 res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
3934 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3935 sizeof(server->exclcreat_bitmask));
3936
3937 server->acl_bitmask = res.acl_bitmask;
3938 server->fh_expire_type = res.fh_expire_type;
3939 }
3940
3941 return status;
3942 }
3943
3944 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3945 {
3946 struct nfs4_exception exception = {
3947 .interruptible = true,
3948 };
3949 int err;
3950 do {
3951 err = nfs4_handle_exception(server,
3952 _nfs4_server_capabilities(server, fhandle),
3953 &exception);
3954 } while (exception.retry);
3955 return err;
3956 }
3957
3958 static int _nfs4_discover_trunking(struct nfs_server *server,
3959 struct nfs_fh *fhandle)
3960 {
3961 struct nfs4_fs_locations *locations = NULL;
3962 struct page *page;
3963 const struct cred *cred;
3964 struct nfs_client *clp = server->nfs_client;
3965 const struct nfs4_state_maintenance_ops *ops =
3966 clp->cl_mvops->state_renewal_ops;
3967 int status = -ENOMEM;
3968
3969 cred = ops->get_state_renewal_cred(clp);
3970 if (cred == NULL) {
3971 cred = nfs4_get_clid_cred(clp);
3972 if (cred == NULL)
3973 return -ENOKEY;
3974 }
3975
3976 page = alloc_page(GFP_KERNEL);
3977 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3978 if (page == NULL || locations == NULL)
3979 goto out;
3980
3981 status = nfs4_proc_get_locations(server, fhandle, locations, page,
3982 cred);
3983 if (status)
3984 goto out;
3985 out:
3986 if (page)
3987 __free_page(page);
3988 kfree(locations);
3989 return status;
3990 }
3991
3992 static int nfs4_discover_trunking(struct nfs_server *server,
3993 struct nfs_fh *fhandle)
3994 {
3995 struct nfs4_exception exception = {
3996 .interruptible = true,
3997 };
3998 struct nfs_client *clp = server->nfs_client;
3999 int err = 0;
4000
4001 if (!nfs4_has_session(clp))
4002 goto out;
4003 do {
4004 err = nfs4_handle_exception(server,
4005 _nfs4_discover_trunking(server, fhandle),
4006 &exception);
4007 } while (exception.retry);
4008 out:
4009 return err;
4010 }
4011
4012 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4013 struct nfs_fsinfo *info)
4014 {
4015 u32 bitmask[3];
4016 struct nfs4_lookup_root_arg args = {
4017 .bitmask = bitmask,
4018 };
4019 struct nfs4_lookup_res res = {
4020 .server = server,
4021 .fattr = info->fattr,
4022 .fh = fhandle,
4023 };
4024 struct rpc_message msg = {
4025 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
4026 .rpc_argp = &args,
4027 .rpc_resp = &res,
4028 };
4029
4030 bitmask[0] = nfs4_fattr_bitmap[0];
4031 bitmask[1] = nfs4_fattr_bitmap[1];
4032 /*
4033 * Process the label in the upcoming getfattr
4034 */
4035 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
4036
4037 nfs_fattr_init(info->fattr);
4038 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4039 }
4040
4041 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4042 struct nfs_fsinfo *info)
4043 {
4044 struct nfs4_exception exception = {
4045 .interruptible = true,
4046 };
4047 int err;
4048 do {
4049 err = _nfs4_lookup_root(server, fhandle, info);
4050 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
4051 switch (err) {
4052 case 0:
4053 case -NFS4ERR_WRONGSEC:
4054 goto out;
4055 default:
4056 err = nfs4_handle_exception(server, err, &exception);
4057 }
4058 } while (exception.retry);
4059 out:
4060 return err;
4061 }
4062
4063 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4064 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
4065 {
4066 struct rpc_auth_create_args auth_args = {
4067 .pseudoflavor = flavor,
4068 };
4069 struct rpc_auth *auth;
4070
4071 auth = rpcauth_create(&auth_args, server->client);
4072 if (IS_ERR(auth))
4073 return -EACCES;
4074 return nfs4_lookup_root(server, fhandle, info);
4075 }
4076
4077 /*
4078 * Retry pseudoroot lookup with various security flavors. We do this when:
4079 *
4080 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
4081 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
4082 *
4083 * Returns zero on success, or a negative NFS4ERR value, or a
4084 * negative errno value.
4085 */
4086 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4087 struct nfs_fsinfo *info)
4088 {
4089 /* Per 3530bis 15.33.5 */
4090 static const rpc_authflavor_t flav_array[] = {
4091 RPC_AUTH_GSS_KRB5P,
4092 RPC_AUTH_GSS_KRB5I,
4093 RPC_AUTH_GSS_KRB5,
4094 RPC_AUTH_UNIX, /* courtesy */
4095 RPC_AUTH_NULL,
4096 };
4097 int status = -EPERM;
4098 size_t i;
4099
4100 if (server->auth_info.flavor_len > 0) {
4101 /* try each flavor specified by user */
4102 for (i = 0; i < server->auth_info.flavor_len; i++) {
4103 status = nfs4_lookup_root_sec(server, fhandle, info,
4104 server->auth_info.flavors[i]);
4105 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4106 continue;
4107 break;
4108 }
4109 } else {
4110 /* no flavors specified by user, try default list */
4111 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
4112 status = nfs4_lookup_root_sec(server, fhandle, info,
4113 flav_array[i]);
4114 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4115 continue;
4116 break;
4117 }
4118 }
4119
4120 /*
4121 * -EACCES could mean that the user doesn't have correct permissions
4122 * to access the mount. It could also mean that we tried to mount
4123 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
4124 * existing mount programs don't handle -EACCES very well so it should
4125 * be mapped to -EPERM instead.
4126 */
4127 if (status == -EACCES)
4128 status = -EPERM;
4129 return status;
4130 }
4131
4132 /**
4133 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
4134 * @server: initialized nfs_server handle
4135 * @fhandle: we fill in the pseudo-fs root file handle
4136 * @info: we fill in an FSINFO struct
4137 * @auth_probe: probe the auth flavours
4138 *
4139 * Returns zero on success, or a negative errno.
4140 */
4141 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
4142 struct nfs_fsinfo *info,
4143 bool auth_probe)
4144 {
4145 int status = 0;
4146
4147 if (!auth_probe)
4148 status = nfs4_lookup_root(server, fhandle, info);
4149
4150 if (auth_probe || status == NFS4ERR_WRONGSEC)
4151 status = server->nfs_client->cl_mvops->find_root_sec(server,
4152 fhandle, info);
4153
4154 if (status == 0)
4155 status = nfs4_server_capabilities(server, fhandle);
4156 if (status == 0)
4157 status = nfs4_do_fsinfo(server, fhandle, info);
4158
4159 return nfs4_map_errors(status);
4160 }
4161
4162 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
4163 struct nfs_fsinfo *info)
4164 {
4165 int error;
4166 struct nfs_fattr *fattr = info->fattr;
4167 struct nfs4_label *label = fattr->label;
4168
4169 error = nfs4_server_capabilities(server, mntfh);
4170 if (error < 0) {
4171 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
4172 return error;
4173 }
4174
4175 error = nfs4_proc_getattr(server, mntfh, fattr, label, NULL);
4176 if (error < 0) {
4177 dprintk("nfs4_get_root: getattr error = %d\n", -error);
4178 goto out;
4179 }
4180
4181 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
4182 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
4183 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
4184
4185 out:
4186 return error;
4187 }
4188
4189 /*
4190 * Get locations and (maybe) other attributes of a referral.
4191 * Note that we'll actually follow the referral later when
4192 * we detect fsid mismatch in inode revalidation
4193 */
4194 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
4195 const struct qstr *name, struct nfs_fattr *fattr,
4196 struct nfs_fh *fhandle)
4197 {
4198 int status = -ENOMEM;
4199 struct page *page = NULL;
4200 struct nfs4_fs_locations *locations = NULL;
4201
4202 page = alloc_page(GFP_KERNEL);
4203 if (page == NULL)
4204 goto out;
4205 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4206 if (locations == NULL)
4207 goto out;
4208
4209 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
4210 if (status != 0)
4211 goto out;
4212
4213 /*
4214 * If the fsid didn't change, this is a migration event, not a
4215 * referral. Cause us to drop into the exception handler, which
4216 * will kick off migration recovery.
4217 */
4218 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
4219 dprintk("%s: server did not return a different fsid for"
4220 " a referral at %s\n", __func__, name->name);
4221 status = -NFS4ERR_MOVED;
4222 goto out;
4223 }
4224 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
4225 nfs_fixup_referral_attributes(&locations->fattr);
4226
4227 /* replace the lookup nfs_fattr with the locations nfs_fattr */
4228 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
4229 memset(fhandle, 0, sizeof(struct nfs_fh));
4230 out:
4231 if (page)
4232 __free_page(page);
4233 kfree(locations);
4234 return status;
4235 }
4236
4237 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4238 struct nfs_fattr *fattr, struct nfs4_label *label,
4239 struct inode *inode)
4240 {
4241 __u32 bitmask[NFS4_BITMASK_SZ];
4242 struct nfs4_getattr_arg args = {
4243 .fh = fhandle,
4244 .bitmask = bitmask,
4245 };
4246 struct nfs4_getattr_res res = {
4247 .fattr = fattr,
4248 .label = label,
4249 .server = server,
4250 };
4251 struct rpc_message msg = {
4252 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4253 .rpc_argp = &args,
4254 .rpc_resp = &res,
4255 };
4256 unsigned short task_flags = 0;
4257
4258 if (nfs4_has_session(server->nfs_client))
4259 task_flags = RPC_TASK_MOVEABLE;
4260
4261 /* Is this is an attribute revalidation, subject to softreval? */
4262 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
4263 task_flags |= RPC_TASK_TIMEOUT;
4264
4265 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode, 0);
4266 nfs_fattr_init(fattr);
4267 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4268 return nfs4_do_call_sync(server->client, server, &msg,
4269 &args.seq_args, &res.seq_res, task_flags);
4270 }
4271
4272 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4273 struct nfs_fattr *fattr, struct nfs4_label *label,
4274 struct inode *inode)
4275 {
4276 struct nfs4_exception exception = {
4277 .interruptible = true,
4278 };
4279 int err;
4280 do {
4281 err = _nfs4_proc_getattr(server, fhandle, fattr, label, inode);
4282 trace_nfs4_getattr(server, fhandle, fattr, err);
4283 err = nfs4_handle_exception(server, err,
4284 &exception);
4285 } while (exception.retry);
4286 return err;
4287 }
4288
4289 /*
4290 * The file is not closed if it is opened due to the a request to change
4291 * the size of the file. The open call will not be needed once the
4292 * VFS layer lookup-intents are implemented.
4293 *
4294 * Close is called when the inode is destroyed.
4295 * If we haven't opened the file for O_WRONLY, we
4296 * need to in the size_change case to obtain a stateid.
4297 *
4298 * Got race?
4299 * Because OPEN is always done by name in nfsv4, it is
4300 * possible that we opened a different file by the same
4301 * name. We can recognize this race condition, but we
4302 * can't do anything about it besides returning an error.
4303 *
4304 * This will be fixed with VFS changes (lookup-intent).
4305 */
4306 static int
4307 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
4308 struct iattr *sattr)
4309 {
4310 struct inode *inode = d_inode(dentry);
4311 const struct cred *cred = NULL;
4312 struct nfs_open_context *ctx = NULL;
4313 struct nfs4_label *label = NULL;
4314 int status;
4315
4316 if (pnfs_ld_layoutret_on_setattr(inode) &&
4317 sattr->ia_valid & ATTR_SIZE &&
4318 sattr->ia_size < i_size_read(inode))
4319 pnfs_commit_and_return_layout(inode);
4320
4321 nfs_fattr_init(fattr);
4322
4323 /* Deal with open(O_TRUNC) */
4324 if (sattr->ia_valid & ATTR_OPEN)
4325 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
4326
4327 /* Optimization: if the end result is no change, don't RPC */
4328 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
4329 return 0;
4330
4331 /* Search for an existing open(O_WRITE) file */
4332 if (sattr->ia_valid & ATTR_FILE) {
4333
4334 ctx = nfs_file_open_context(sattr->ia_file);
4335 if (ctx)
4336 cred = ctx->cred;
4337 }
4338
4339 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
4340 if (IS_ERR(label))
4341 return PTR_ERR(label);
4342
4343 /* Return any delegations if we're going to change ACLs */
4344 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
4345 nfs4_inode_make_writeable(inode);
4346
4347 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL, label);
4348 if (status == 0) {
4349 nfs_setattr_update_inode(inode, sattr, fattr);
4350 nfs_setsecurity(inode, fattr, label);
4351 }
4352 nfs4_label_free(label);
4353 return status;
4354 }
4355
4356 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
4357 struct dentry *dentry, struct nfs_fh *fhandle,
4358 struct nfs_fattr *fattr, struct nfs4_label *label)
4359 {
4360 struct nfs_server *server = NFS_SERVER(dir);
4361 int status;
4362 struct nfs4_lookup_arg args = {
4363 .bitmask = server->attr_bitmask,
4364 .dir_fh = NFS_FH(dir),
4365 .name = &dentry->d_name,
4366 };
4367 struct nfs4_lookup_res res = {
4368 .server = server,
4369 .fattr = fattr,
4370 .label = label,
4371 .fh = fhandle,
4372 };
4373 struct rpc_message msg = {
4374 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
4375 .rpc_argp = &args,
4376 .rpc_resp = &res,
4377 };
4378 unsigned short task_flags = 0;
4379
4380 if (server->nfs_client->cl_minorversion)
4381 task_flags = RPC_TASK_MOVEABLE;
4382
4383 /* Is this is an attribute revalidation, subject to softreval? */
4384 if (nfs_lookup_is_soft_revalidate(dentry))
4385 task_flags |= RPC_TASK_TIMEOUT;
4386
4387 args.bitmask = nfs4_bitmask(server, label);
4388
4389 nfs_fattr_init(fattr);
4390
4391 dprintk("NFS call lookup %pd2\n", dentry);
4392 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4393 status = nfs4_do_call_sync(clnt, server, &msg,
4394 &args.seq_args, &res.seq_res, task_flags);
4395 dprintk("NFS reply lookup: %d\n", status);
4396 return status;
4397 }
4398
4399 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
4400 {
4401 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4402 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
4403 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4404 fattr->nlink = 2;
4405 }
4406
4407 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
4408 struct dentry *dentry, struct nfs_fh *fhandle,
4409 struct nfs_fattr *fattr, struct nfs4_label *label)
4410 {
4411 struct nfs4_exception exception = {
4412 .interruptible = true,
4413 };
4414 struct rpc_clnt *client = *clnt;
4415 const struct qstr *name = &dentry->d_name;
4416 int err;
4417 do {
4418 err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr, label);
4419 trace_nfs4_lookup(dir, name, err);
4420 switch (err) {
4421 case -NFS4ERR_BADNAME:
4422 err = -ENOENT;
4423 goto out;
4424 case -NFS4ERR_MOVED:
4425 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
4426 if (err == -NFS4ERR_MOVED)
4427 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4428 goto out;
4429 case -NFS4ERR_WRONGSEC:
4430 err = -EPERM;
4431 if (client != *clnt)
4432 goto out;
4433 client = nfs4_negotiate_security(client, dir, name);
4434 if (IS_ERR(client))
4435 return PTR_ERR(client);
4436
4437 exception.retry = 1;
4438 break;
4439 default:
4440 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4441 }
4442 } while (exception.retry);
4443
4444 out:
4445 if (err == 0)
4446 *clnt = client;
4447 else if (client != *clnt)
4448 rpc_shutdown_client(client);
4449
4450 return err;
4451 }
4452
4453 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
4454 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4455 struct nfs4_label *label)
4456 {
4457 int status;
4458 struct rpc_clnt *client = NFS_CLIENT(dir);
4459
4460 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, label);
4461 if (client != NFS_CLIENT(dir)) {
4462 rpc_shutdown_client(client);
4463 nfs_fixup_secinfo_attributes(fattr);
4464 }
4465 return status;
4466 }
4467
4468 struct rpc_clnt *
4469 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
4470 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4471 {
4472 struct rpc_clnt *client = NFS_CLIENT(dir);
4473 int status;
4474
4475 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, NULL);
4476 if (status < 0)
4477 return ERR_PTR(status);
4478 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
4479 }
4480
4481 static int _nfs4_proc_lookupp(struct inode *inode,
4482 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4483 struct nfs4_label *label)
4484 {
4485 struct rpc_clnt *clnt = NFS_CLIENT(inode);
4486 struct nfs_server *server = NFS_SERVER(inode);
4487 int status;
4488 struct nfs4_lookupp_arg args = {
4489 .bitmask = server->attr_bitmask,
4490 .fh = NFS_FH(inode),
4491 };
4492 struct nfs4_lookupp_res res = {
4493 .server = server,
4494 .fattr = fattr,
4495 .label = label,
4496 .fh = fhandle,
4497 };
4498 struct rpc_message msg = {
4499 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
4500 .rpc_argp = &args,
4501 .rpc_resp = &res,
4502 };
4503 unsigned short task_flags = 0;
4504
4505 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
4506 task_flags |= RPC_TASK_TIMEOUT;
4507
4508 args.bitmask = nfs4_bitmask(server, label);
4509
4510 nfs_fattr_init(fattr);
4511
4512 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
4513 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
4514 &res.seq_res, task_flags);
4515 dprintk("NFS reply lookupp: %d\n", status);
4516 return status;
4517 }
4518
4519 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
4520 struct nfs_fattr *fattr, struct nfs4_label *label)
4521 {
4522 struct nfs4_exception exception = {
4523 .interruptible = true,
4524 };
4525 int err;
4526 do {
4527 err = _nfs4_proc_lookupp(inode, fhandle, fattr, label);
4528 trace_nfs4_lookupp(inode, err);
4529 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4530 &exception);
4531 } while (exception.retry);
4532 return err;
4533 }
4534
4535 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4536 {
4537 struct nfs_server *server = NFS_SERVER(inode);
4538 struct nfs4_accessargs args = {
4539 .fh = NFS_FH(inode),
4540 .access = entry->mask,
4541 };
4542 struct nfs4_accessres res = {
4543 .server = server,
4544 };
4545 struct rpc_message msg = {
4546 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
4547 .rpc_argp = &args,
4548 .rpc_resp = &res,
4549 .rpc_cred = entry->cred,
4550 };
4551 int status = 0;
4552
4553 if (!nfs4_have_delegation(inode, FMODE_READ)) {
4554 res.fattr = nfs_alloc_fattr();
4555 if (res.fattr == NULL)
4556 return -ENOMEM;
4557 args.bitmask = server->cache_consistency_bitmask;
4558 }
4559 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4560 if (!status) {
4561 nfs_access_set_mask(entry, res.access);
4562 if (res.fattr)
4563 nfs_refresh_inode(inode, res.fattr);
4564 }
4565 nfs_free_fattr(res.fattr);
4566 return status;
4567 }
4568
4569 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4570 {
4571 struct nfs4_exception exception = {
4572 .interruptible = true,
4573 };
4574 int err;
4575 do {
4576 err = _nfs4_proc_access(inode, entry);
4577 trace_nfs4_access(inode, err);
4578 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4579 &exception);
4580 } while (exception.retry);
4581 return err;
4582 }
4583
4584 /*
4585 * TODO: For the time being, we don't try to get any attributes
4586 * along with any of the zero-copy operations READ, READDIR,
4587 * READLINK, WRITE.
4588 *
4589 * In the case of the first three, we want to put the GETATTR
4590 * after the read-type operation -- this is because it is hard
4591 * to predict the length of a GETATTR response in v4, and thus
4592 * align the READ data correctly. This means that the GETATTR
4593 * may end up partially falling into the page cache, and we should
4594 * shift it into the 'tail' of the xdr_buf before processing.
4595 * To do this efficiently, we need to know the total length
4596 * of data received, which doesn't seem to be available outside
4597 * of the RPC layer.
4598 *
4599 * In the case of WRITE, we also want to put the GETATTR after
4600 * the operation -- in this case because we want to make sure
4601 * we get the post-operation mtime and size.
4602 *
4603 * Both of these changes to the XDR layer would in fact be quite
4604 * minor, but I decided to leave them for a subsequent patch.
4605 */
4606 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
4607 unsigned int pgbase, unsigned int pglen)
4608 {
4609 struct nfs4_readlink args = {
4610 .fh = NFS_FH(inode),
4611 .pgbase = pgbase,
4612 .pglen = pglen,
4613 .pages = &page,
4614 };
4615 struct nfs4_readlink_res res;
4616 struct rpc_message msg = {
4617 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
4618 .rpc_argp = &args,
4619 .rpc_resp = &res,
4620 };
4621
4622 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
4623 }
4624
4625 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
4626 unsigned int pgbase, unsigned int pglen)
4627 {
4628 struct nfs4_exception exception = {
4629 .interruptible = true,
4630 };
4631 int err;
4632 do {
4633 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
4634 trace_nfs4_readlink(inode, err);
4635 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4636 &exception);
4637 } while (exception.retry);
4638 return err;
4639 }
4640
4641 /*
4642 * This is just for mknod. open(O_CREAT) will always do ->open_context().
4643 */
4644 static int
4645 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
4646 int flags)
4647 {
4648 struct nfs_server *server = NFS_SERVER(dir);
4649 struct nfs4_label l, *ilabel = NULL;
4650 struct nfs_open_context *ctx;
4651 struct nfs4_state *state;
4652 int status = 0;
4653
4654 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
4655 if (IS_ERR(ctx))
4656 return PTR_ERR(ctx);
4657
4658 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
4659
4660 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4661 sattr->ia_mode &= ~current_umask();
4662 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
4663 if (IS_ERR(state)) {
4664 status = PTR_ERR(state);
4665 goto out;
4666 }
4667 out:
4668 nfs4_label_release_security(ilabel);
4669 put_nfs_open_context(ctx);
4670 return status;
4671 }
4672
4673 static int
4674 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
4675 {
4676 struct nfs_server *server = NFS_SERVER(dir);
4677 struct nfs_removeargs args = {
4678 .fh = NFS_FH(dir),
4679 .name = *name,
4680 };
4681 struct nfs_removeres res = {
4682 .server = server,
4683 };
4684 struct rpc_message msg = {
4685 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
4686 .rpc_argp = &args,
4687 .rpc_resp = &res,
4688 };
4689 unsigned long timestamp = jiffies;
4690 int status;
4691
4692 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4693 if (status == 0) {
4694 spin_lock(&dir->i_lock);
4695 /* Removing a directory decrements nlink in the parent */
4696 if (ftype == NF4DIR && dir->i_nlink > 2)
4697 nfs4_dec_nlink_locked(dir);
4698 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp,
4699 NFS_INO_INVALID_DATA);
4700 spin_unlock(&dir->i_lock);
4701 }
4702 return status;
4703 }
4704
4705 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
4706 {
4707 struct nfs4_exception exception = {
4708 .interruptible = true,
4709 };
4710 struct inode *inode = d_inode(dentry);
4711 int err;
4712
4713 if (inode) {
4714 if (inode->i_nlink == 1)
4715 nfs4_inode_return_delegation(inode);
4716 else
4717 nfs4_inode_make_writeable(inode);
4718 }
4719 do {
4720 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
4721 trace_nfs4_remove(dir, &dentry->d_name, err);
4722 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4723 &exception);
4724 } while (exception.retry);
4725 return err;
4726 }
4727
4728 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
4729 {
4730 struct nfs4_exception exception = {
4731 .interruptible = true,
4732 };
4733 int err;
4734
4735 do {
4736 err = _nfs4_proc_remove(dir, name, NF4DIR);
4737 trace_nfs4_remove(dir, name, err);
4738 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4739 &exception);
4740 } while (exception.retry);
4741 return err;
4742 }
4743
4744 static void nfs4_proc_unlink_setup(struct rpc_message *msg,
4745 struct dentry *dentry,
4746 struct inode *inode)
4747 {
4748 struct nfs_removeargs *args = msg->rpc_argp;
4749 struct nfs_removeres *res = msg->rpc_resp;
4750
4751 res->server = NFS_SB(dentry->d_sb);
4752 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
4753 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0);
4754
4755 nfs_fattr_init(res->dir_attr);
4756
4757 if (inode)
4758 nfs4_inode_return_delegation(inode);
4759 }
4760
4761 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
4762 {
4763 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
4764 &data->args.seq_args,
4765 &data->res.seq_res,
4766 task);
4767 }
4768
4769 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4770 {
4771 struct nfs_unlinkdata *data = task->tk_calldata;
4772 struct nfs_removeres *res = &data->res;
4773
4774 if (!nfs4_sequence_done(task, &res->seq_res))
4775 return 0;
4776 if (nfs4_async_handle_error(task, res->server, NULL,
4777 &data->timeout) == -EAGAIN)
4778 return 0;
4779 if (task->tk_status == 0)
4780 nfs4_update_changeattr(dir, &res->cinfo,
4781 res->dir_attr->time_start,
4782 NFS_INO_INVALID_DATA);
4783 return 1;
4784 }
4785
4786 static void nfs4_proc_rename_setup(struct rpc_message *msg,
4787 struct dentry *old_dentry,
4788 struct dentry *new_dentry)
4789 {
4790 struct nfs_renameargs *arg = msg->rpc_argp;
4791 struct nfs_renameres *res = msg->rpc_resp;
4792 struct inode *old_inode = d_inode(old_dentry);
4793 struct inode *new_inode = d_inode(new_dentry);
4794
4795 if (old_inode)
4796 nfs4_inode_make_writeable(old_inode);
4797 if (new_inode)
4798 nfs4_inode_return_delegation(new_inode);
4799 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
4800 res->server = NFS_SB(old_dentry->d_sb);
4801 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0);
4802 }
4803
4804 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
4805 {
4806 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
4807 &data->args.seq_args,
4808 &data->res.seq_res,
4809 task);
4810 }
4811
4812 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
4813 struct inode *new_dir)
4814 {
4815 struct nfs_renamedata *data = task->tk_calldata;
4816 struct nfs_renameres *res = &data->res;
4817
4818 if (!nfs4_sequence_done(task, &res->seq_res))
4819 return 0;
4820 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
4821 return 0;
4822
4823 if (task->tk_status == 0) {
4824 if (new_dir != old_dir) {
4825 /* Note: If we moved a directory, nlink will change */
4826 nfs4_update_changeattr(old_dir, &res->old_cinfo,
4827 res->old_fattr->time_start,
4828 NFS_INO_INVALID_NLINK |
4829 NFS_INO_INVALID_DATA);
4830 nfs4_update_changeattr(new_dir, &res->new_cinfo,
4831 res->new_fattr->time_start,
4832 NFS_INO_INVALID_NLINK |
4833 NFS_INO_INVALID_DATA);
4834 } else
4835 nfs4_update_changeattr(old_dir, &res->old_cinfo,
4836 res->old_fattr->time_start,
4837 NFS_INO_INVALID_DATA);
4838 }
4839 return 1;
4840 }
4841
4842 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4843 {
4844 struct nfs_server *server = NFS_SERVER(inode);
4845 __u32 bitmask[NFS4_BITMASK_SZ];
4846 struct nfs4_link_arg arg = {
4847 .fh = NFS_FH(inode),
4848 .dir_fh = NFS_FH(dir),
4849 .name = name,
4850 .bitmask = bitmask,
4851 };
4852 struct nfs4_link_res res = {
4853 .server = server,
4854 .label = NULL,
4855 };
4856 struct rpc_message msg = {
4857 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
4858 .rpc_argp = &arg,
4859 .rpc_resp = &res,
4860 };
4861 int status = -ENOMEM;
4862
4863 res.fattr = nfs_alloc_fattr();
4864 if (res.fattr == NULL)
4865 goto out;
4866
4867 res.label = nfs4_label_alloc(server, GFP_KERNEL);
4868 if (IS_ERR(res.label)) {
4869 status = PTR_ERR(res.label);
4870 goto out;
4871 }
4872
4873 nfs4_inode_make_writeable(inode);
4874 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.label), inode,
4875 NFS_INO_INVALID_CHANGE);
4876 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4877 if (!status) {
4878 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
4879 NFS_INO_INVALID_DATA);
4880 nfs4_inc_nlink(inode);
4881 status = nfs_post_op_update_inode(inode, res.fattr);
4882 if (!status)
4883 nfs_setsecurity(inode, res.fattr, res.label);
4884 }
4885
4886
4887 nfs4_label_free(res.label);
4888
4889 out:
4890 nfs_free_fattr(res.fattr);
4891 return status;
4892 }
4893
4894 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4895 {
4896 struct nfs4_exception exception = {
4897 .interruptible = true,
4898 };
4899 int err;
4900 do {
4901 err = nfs4_handle_exception(NFS_SERVER(inode),
4902 _nfs4_proc_link(inode, dir, name),
4903 &exception);
4904 } while (exception.retry);
4905 return err;
4906 }
4907
4908 struct nfs4_createdata {
4909 struct rpc_message msg;
4910 struct nfs4_create_arg arg;
4911 struct nfs4_create_res res;
4912 struct nfs_fh fh;
4913 struct nfs_fattr fattr;
4914 struct nfs4_label *label;
4915 };
4916
4917 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
4918 const struct qstr *name, struct iattr *sattr, u32 ftype)
4919 {
4920 struct nfs4_createdata *data;
4921
4922 data = kzalloc(sizeof(*data), GFP_KERNEL);
4923 if (data != NULL) {
4924 struct nfs_server *server = NFS_SERVER(dir);
4925
4926 data->label = nfs4_label_alloc(server, GFP_KERNEL);
4927 if (IS_ERR(data->label))
4928 goto out_free;
4929
4930 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
4931 data->msg.rpc_argp = &data->arg;
4932 data->msg.rpc_resp = &data->res;
4933 data->arg.dir_fh = NFS_FH(dir);
4934 data->arg.server = server;
4935 data->arg.name = name;
4936 data->arg.attrs = sattr;
4937 data->arg.ftype = ftype;
4938 data->arg.bitmask = nfs4_bitmask(server, data->label);
4939 data->arg.umask = current_umask();
4940 data->res.server = server;
4941 data->res.fh = &data->fh;
4942 data->res.fattr = &data->fattr;
4943 data->res.label = data->label;
4944 nfs_fattr_init(data->res.fattr);
4945 }
4946 return data;
4947 out_free:
4948 kfree(data);
4949 return NULL;
4950 }
4951
4952 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
4953 {
4954 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
4955 &data->arg.seq_args, &data->res.seq_res, 1);
4956 if (status == 0) {
4957 spin_lock(&dir->i_lock);
4958 /* Creating a directory bumps nlink in the parent */
4959 if (data->arg.ftype == NF4DIR)
4960 nfs4_inc_nlink_locked(dir);
4961 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
4962 data->res.fattr->time_start,
4963 NFS_INO_INVALID_DATA);
4964 spin_unlock(&dir->i_lock);
4965 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
4966 }
4967 return status;
4968 }
4969
4970 static void nfs4_free_createdata(struct nfs4_createdata *data)
4971 {
4972 nfs4_label_free(data->label);
4973 kfree(data);
4974 }
4975
4976 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4977 struct page *page, unsigned int len, struct iattr *sattr,
4978 struct nfs4_label *label)
4979 {
4980 struct nfs4_createdata *data;
4981 int status = -ENAMETOOLONG;
4982
4983 if (len > NFS4_MAXPATHLEN)
4984 goto out;
4985
4986 status = -ENOMEM;
4987 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
4988 if (data == NULL)
4989 goto out;
4990
4991 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
4992 data->arg.u.symlink.pages = &page;
4993 data->arg.u.symlink.len = len;
4994 data->arg.label = label;
4995
4996 status = nfs4_do_create(dir, dentry, data);
4997
4998 nfs4_free_createdata(data);
4999 out:
5000 return status;
5001 }
5002
5003 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
5004 struct page *page, unsigned int len, struct iattr *sattr)
5005 {
5006 struct nfs4_exception exception = {
5007 .interruptible = true,
5008 };
5009 struct nfs4_label l, *label = NULL;
5010 int err;
5011
5012 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5013
5014 do {
5015 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
5016 trace_nfs4_symlink(dir, &dentry->d_name, err);
5017 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5018 &exception);
5019 } while (exception.retry);
5020
5021 nfs4_label_release_security(label);
5022 return err;
5023 }
5024
5025 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5026 struct iattr *sattr, struct nfs4_label *label)
5027 {
5028 struct nfs4_createdata *data;
5029 int status = -ENOMEM;
5030
5031 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
5032 if (data == NULL)
5033 goto out;
5034
5035 data->arg.label = label;
5036 status = nfs4_do_create(dir, dentry, data);
5037
5038 nfs4_free_createdata(data);
5039 out:
5040 return status;
5041 }
5042
5043 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5044 struct iattr *sattr)
5045 {
5046 struct nfs_server *server = NFS_SERVER(dir);
5047 struct nfs4_exception exception = {
5048 .interruptible = true,
5049 };
5050 struct nfs4_label l, *label = NULL;
5051 int err;
5052
5053 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5054
5055 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5056 sattr->ia_mode &= ~current_umask();
5057 do {
5058 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
5059 trace_nfs4_mkdir(dir, &dentry->d_name, err);
5060 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5061 &exception);
5062 } while (exception.retry);
5063 nfs4_label_release_security(label);
5064
5065 return err;
5066 }
5067
5068 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
5069 struct nfs_readdir_res *nr_res)
5070 {
5071 struct inode *dir = d_inode(nr_arg->dentry);
5072 struct nfs_server *server = NFS_SERVER(dir);
5073 struct nfs4_readdir_arg args = {
5074 .fh = NFS_FH(dir),
5075 .pages = nr_arg->pages,
5076 .pgbase = 0,
5077 .count = nr_arg->page_len,
5078 .plus = nr_arg->plus,
5079 };
5080 struct nfs4_readdir_res res;
5081 struct rpc_message msg = {
5082 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
5083 .rpc_argp = &args,
5084 .rpc_resp = &res,
5085 .rpc_cred = nr_arg->cred,
5086 };
5087 int status;
5088
5089 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__,
5090 nr_arg->dentry, (unsigned long long)nr_arg->cookie);
5091 if (!(server->caps & NFS_CAP_SECURITY_LABEL))
5092 args.bitmask = server->attr_bitmask_nl;
5093 else
5094 args.bitmask = server->attr_bitmask;
5095
5096 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args);
5097 res.pgbase = args.pgbase;
5098 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
5099 &res.seq_res, 0);
5100 if (status >= 0) {
5101 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE);
5102 status += args.pgbase;
5103 }
5104
5105 nfs_invalidate_atime(dir);
5106
5107 dprintk("%s: returns %d\n", __func__, status);
5108 return status;
5109 }
5110
5111 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg,
5112 struct nfs_readdir_res *res)
5113 {
5114 struct nfs4_exception exception = {
5115 .interruptible = true,
5116 };
5117 int err;
5118 do {
5119 err = _nfs4_proc_readdir(arg, res);
5120 trace_nfs4_readdir(d_inode(arg->dentry), err);
5121 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)),
5122 err, &exception);
5123 } while (exception.retry);
5124 return err;
5125 }
5126
5127 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5128 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
5129 {
5130 struct nfs4_createdata *data;
5131 int mode = sattr->ia_mode;
5132 int status = -ENOMEM;
5133
5134 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
5135 if (data == NULL)
5136 goto out;
5137
5138 if (S_ISFIFO(mode))
5139 data->arg.ftype = NF4FIFO;
5140 else if (S_ISBLK(mode)) {
5141 data->arg.ftype = NF4BLK;
5142 data->arg.u.device.specdata1 = MAJOR(rdev);
5143 data->arg.u.device.specdata2 = MINOR(rdev);
5144 }
5145 else if (S_ISCHR(mode)) {
5146 data->arg.ftype = NF4CHR;
5147 data->arg.u.device.specdata1 = MAJOR(rdev);
5148 data->arg.u.device.specdata2 = MINOR(rdev);
5149 } else if (!S_ISSOCK(mode)) {
5150 status = -EINVAL;
5151 goto out_free;
5152 }
5153
5154 data->arg.label = label;
5155 status = nfs4_do_create(dir, dentry, data);
5156 out_free:
5157 nfs4_free_createdata(data);
5158 out:
5159 return status;
5160 }
5161
5162 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5163 struct iattr *sattr, dev_t rdev)
5164 {
5165 struct nfs_server *server = NFS_SERVER(dir);
5166 struct nfs4_exception exception = {
5167 .interruptible = true,
5168 };
5169 struct nfs4_label l, *label = NULL;
5170 int err;
5171
5172 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5173
5174 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5175 sattr->ia_mode &= ~current_umask();
5176 do {
5177 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
5178 trace_nfs4_mknod(dir, &dentry->d_name, err);
5179 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5180 &exception);
5181 } while (exception.retry);
5182
5183 nfs4_label_release_security(label);
5184
5185 return err;
5186 }
5187
5188 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
5189 struct nfs_fsstat *fsstat)
5190 {
5191 struct nfs4_statfs_arg args = {
5192 .fh = fhandle,
5193 .bitmask = server->attr_bitmask,
5194 };
5195 struct nfs4_statfs_res res = {
5196 .fsstat = fsstat,
5197 };
5198 struct rpc_message msg = {
5199 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
5200 .rpc_argp = &args,
5201 .rpc_resp = &res,
5202 };
5203
5204 nfs_fattr_init(fsstat->fattr);
5205 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5206 }
5207
5208 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
5209 {
5210 struct nfs4_exception exception = {
5211 .interruptible = true,
5212 };
5213 int err;
5214 do {
5215 err = nfs4_handle_exception(server,
5216 _nfs4_proc_statfs(server, fhandle, fsstat),
5217 &exception);
5218 } while (exception.retry);
5219 return err;
5220 }
5221
5222 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
5223 struct nfs_fsinfo *fsinfo)
5224 {
5225 struct nfs4_fsinfo_arg args = {
5226 .fh = fhandle,
5227 .bitmask = server->attr_bitmask,
5228 };
5229 struct nfs4_fsinfo_res res = {
5230 .fsinfo = fsinfo,
5231 };
5232 struct rpc_message msg = {
5233 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
5234 .rpc_argp = &args,
5235 .rpc_resp = &res,
5236 };
5237
5238 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5239 }
5240
5241 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5242 {
5243 struct nfs4_exception exception = {
5244 .interruptible = true,
5245 };
5246 int err;
5247
5248 do {
5249 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
5250 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
5251 if (err == 0) {
5252 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
5253 break;
5254 }
5255 err = nfs4_handle_exception(server, err, &exception);
5256 } while (exception.retry);
5257 return err;
5258 }
5259
5260 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5261 {
5262 int error;
5263
5264 nfs_fattr_init(fsinfo->fattr);
5265 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
5266 if (error == 0) {
5267 /* block layout checks this! */
5268 server->pnfs_blksize = fsinfo->blksize;
5269 set_pnfs_layoutdriver(server, fhandle, fsinfo);
5270 }
5271
5272 return error;
5273 }
5274
5275 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5276 struct nfs_pathconf *pathconf)
5277 {
5278 struct nfs4_pathconf_arg args = {
5279 .fh = fhandle,
5280 .bitmask = server->attr_bitmask,
5281 };
5282 struct nfs4_pathconf_res res = {
5283 .pathconf = pathconf,
5284 };
5285 struct rpc_message msg = {
5286 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
5287 .rpc_argp = &args,
5288 .rpc_resp = &res,
5289 };
5290
5291 /* None of the pathconf attributes are mandatory to implement */
5292 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
5293 memset(pathconf, 0, sizeof(*pathconf));
5294 return 0;
5295 }
5296
5297 nfs_fattr_init(pathconf->fattr);
5298 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5299 }
5300
5301 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5302 struct nfs_pathconf *pathconf)
5303 {
5304 struct nfs4_exception exception = {
5305 .interruptible = true,
5306 };
5307 int err;
5308
5309 do {
5310 err = nfs4_handle_exception(server,
5311 _nfs4_proc_pathconf(server, fhandle, pathconf),
5312 &exception);
5313 } while (exception.retry);
5314 return err;
5315 }
5316
5317 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
5318 const struct nfs_open_context *ctx,
5319 const struct nfs_lock_context *l_ctx,
5320 fmode_t fmode)
5321 {
5322 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
5323 }
5324 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
5325
5326 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
5327 const struct nfs_open_context *ctx,
5328 const struct nfs_lock_context *l_ctx,
5329 fmode_t fmode)
5330 {
5331 nfs4_stateid _current_stateid;
5332
5333 /* If the current stateid represents a lost lock, then exit */
5334 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO)
5335 return true;
5336 return nfs4_stateid_match(stateid, &_current_stateid);
5337 }
5338
5339 static bool nfs4_error_stateid_expired(int err)
5340 {
5341 switch (err) {
5342 case -NFS4ERR_DELEG_REVOKED:
5343 case -NFS4ERR_ADMIN_REVOKED:
5344 case -NFS4ERR_BAD_STATEID:
5345 case -NFS4ERR_STALE_STATEID:
5346 case -NFS4ERR_OLD_STATEID:
5347 case -NFS4ERR_OPENMODE:
5348 case -NFS4ERR_EXPIRED:
5349 return true;
5350 }
5351 return false;
5352 }
5353
5354 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
5355 {
5356 struct nfs_server *server = NFS_SERVER(hdr->inode);
5357
5358 trace_nfs4_read(hdr, task->tk_status);
5359 if (task->tk_status < 0) {
5360 struct nfs4_exception exception = {
5361 .inode = hdr->inode,
5362 .state = hdr->args.context->state,
5363 .stateid = &hdr->args.stateid,
5364 };
5365 task->tk_status = nfs4_async_handle_exception(task,
5366 server, task->tk_status, &exception);
5367 if (exception.retry) {
5368 rpc_restart_call_prepare(task);
5369 return -EAGAIN;
5370 }
5371 }
5372
5373 if (task->tk_status > 0)
5374 renew_lease(server, hdr->timestamp);
5375 return 0;
5376 }
5377
5378 static bool nfs4_read_stateid_changed(struct rpc_task *task,
5379 struct nfs_pgio_args *args)
5380 {
5381
5382 if (!nfs4_error_stateid_expired(task->tk_status) ||
5383 nfs4_stateid_is_current(&args->stateid,
5384 args->context,
5385 args->lock_context,
5386 FMODE_READ))
5387 return false;
5388 rpc_restart_call_prepare(task);
5389 return true;
5390 }
5391
5392 static bool nfs4_read_plus_not_supported(struct rpc_task *task,
5393 struct nfs_pgio_header *hdr)
5394 {
5395 struct nfs_server *server = NFS_SERVER(hdr->inode);
5396 struct rpc_message *msg = &task->tk_msg;
5397
5398 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
5399 server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
5400 server->caps &= ~NFS_CAP_READ_PLUS;
5401 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5402 rpc_restart_call_prepare(task);
5403 return true;
5404 }
5405 return false;
5406 }
5407
5408 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5409 {
5410 dprintk("--> %s\n", __func__);
5411
5412 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5413 return -EAGAIN;
5414 if (nfs4_read_stateid_changed(task, &hdr->args))
5415 return -EAGAIN;
5416 if (nfs4_read_plus_not_supported(task, hdr))
5417 return -EAGAIN;
5418 if (task->tk_status > 0)
5419 nfs_invalidate_atime(hdr->inode);
5420 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5421 nfs4_read_done_cb(task, hdr);
5422 }
5423
5424 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
5425 static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5426 struct rpc_message *msg)
5427 {
5428 /* Note: We don't use READ_PLUS with pNFS yet */
5429 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp)
5430 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
5431 }
5432 #else
5433 static void nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5434 struct rpc_message *msg)
5435 {
5436 }
5437 #endif /* CONFIG_NFS_V4_2 */
5438
5439 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
5440 struct rpc_message *msg)
5441 {
5442 hdr->timestamp = jiffies;
5443 if (!hdr->pgio_done_cb)
5444 hdr->pgio_done_cb = nfs4_read_done_cb;
5445 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5446 nfs42_read_plus_support(hdr, msg);
5447 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5448 }
5449
5450 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
5451 struct nfs_pgio_header *hdr)
5452 {
5453 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
5454 &hdr->args.seq_args,
5455 &hdr->res.seq_res,
5456 task))
5457 return 0;
5458 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
5459 hdr->args.lock_context,
5460 hdr->rw_mode) == -EIO)
5461 return -EIO;
5462 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
5463 return -EIO;
5464 return 0;
5465 }
5466
5467 static int nfs4_write_done_cb(struct rpc_task *task,
5468 struct nfs_pgio_header *hdr)
5469 {
5470 struct inode *inode = hdr->inode;
5471
5472 trace_nfs4_write(hdr, task->tk_status);
5473 if (task->tk_status < 0) {
5474 struct nfs4_exception exception = {
5475 .inode = hdr->inode,
5476 .state = hdr->args.context->state,
5477 .stateid = &hdr->args.stateid,
5478 };
5479 task->tk_status = nfs4_async_handle_exception(task,
5480 NFS_SERVER(inode), task->tk_status,
5481 &exception);
5482 if (exception.retry) {
5483 rpc_restart_call_prepare(task);
5484 return -EAGAIN;
5485 }
5486 }
5487 if (task->tk_status >= 0) {
5488 renew_lease(NFS_SERVER(inode), hdr->timestamp);
5489 nfs_writeback_update_inode(hdr);
5490 }
5491 return 0;
5492 }
5493
5494 static bool nfs4_write_stateid_changed(struct rpc_task *task,
5495 struct nfs_pgio_args *args)
5496 {
5497
5498 if (!nfs4_error_stateid_expired(task->tk_status) ||
5499 nfs4_stateid_is_current(&args->stateid,
5500 args->context,
5501 args->lock_context,
5502 FMODE_WRITE))
5503 return false;
5504 rpc_restart_call_prepare(task);
5505 return true;
5506 }
5507
5508 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5509 {
5510 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5511 return -EAGAIN;
5512 if (nfs4_write_stateid_changed(task, &hdr->args))
5513 return -EAGAIN;
5514 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5515 nfs4_write_done_cb(task, hdr);
5516 }
5517
5518 static
5519 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
5520 {
5521 /* Don't request attributes for pNFS or O_DIRECT writes */
5522 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
5523 return false;
5524 /* Otherwise, request attributes if and only if we don't hold
5525 * a delegation
5526 */
5527 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
5528 }
5529
5530 static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
5531 struct inode *inode, struct nfs_server *server,
5532 struct nfs4_label *label)
5533 {
5534 unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
5535 unsigned int i;
5536
5537 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
5538
5539 if (cache_validity & NFS_INO_INVALID_CHANGE)
5540 bitmask[0] |= FATTR4_WORD0_CHANGE;
5541 if (cache_validity & NFS_INO_INVALID_ATIME)
5542 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
5543 if (cache_validity & NFS_INO_INVALID_MODE)
5544 bitmask[1] |= FATTR4_WORD1_MODE;
5545 if (cache_validity & NFS_INO_INVALID_OTHER)
5546 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP;
5547 if (cache_validity & NFS_INO_INVALID_NLINK)
5548 bitmask[1] |= FATTR4_WORD1_NUMLINKS;
5549 if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
5550 bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
5551 if (cache_validity & NFS_INO_INVALID_CTIME)
5552 bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
5553 if (cache_validity & NFS_INO_INVALID_MTIME)
5554 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
5555 if (cache_validity & NFS_INO_INVALID_BLOCKS)
5556 bitmask[1] |= FATTR4_WORD1_SPACE_USED;
5557
5558 if (cache_validity & NFS_INO_INVALID_SIZE)
5559 bitmask[0] |= FATTR4_WORD0_SIZE;
5560
5561 for (i = 0; i < NFS4_BITMASK_SZ; i++)
5562 bitmask[i] &= server->attr_bitmask[i];
5563 }
5564
5565 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
5566 struct rpc_message *msg,
5567 struct rpc_clnt **clnt)
5568 {
5569 struct nfs_server *server = NFS_SERVER(hdr->inode);
5570
5571 if (!nfs4_write_need_cache_consistency_data(hdr)) {
5572 hdr->args.bitmask = NULL;
5573 hdr->res.fattr = NULL;
5574 } else {
5575 nfs4_bitmask_set(hdr->args.bitmask_store,
5576 server->cache_consistency_bitmask,
5577 hdr->inode, server, NULL);
5578 hdr->args.bitmask = hdr->args.bitmask_store;
5579 }
5580
5581 if (!hdr->pgio_done_cb)
5582 hdr->pgio_done_cb = nfs4_write_done_cb;
5583 hdr->res.server = server;
5584 hdr->timestamp = jiffies;
5585
5586 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
5587 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5588 nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
5589 }
5590
5591 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
5592 {
5593 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
5594 &data->args.seq_args,
5595 &data->res.seq_res,
5596 task);
5597 }
5598
5599 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
5600 {
5601 struct inode *inode = data->inode;
5602
5603 trace_nfs4_commit(data, task->tk_status);
5604 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
5605 NULL, NULL) == -EAGAIN) {
5606 rpc_restart_call_prepare(task);
5607 return -EAGAIN;
5608 }
5609 return 0;
5610 }
5611
5612 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
5613 {
5614 if (!nfs4_sequence_done(task, &data->res.seq_res))
5615 return -EAGAIN;
5616 return data->commit_done_cb(task, data);
5617 }
5618
5619 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg,
5620 struct rpc_clnt **clnt)
5621 {
5622 struct nfs_server *server = NFS_SERVER(data->inode);
5623
5624 if (data->commit_done_cb == NULL)
5625 data->commit_done_cb = nfs4_commit_done_cb;
5626 data->res.server = server;
5627 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
5628 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
5629 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
5630 }
5631
5632 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
5633 struct nfs_commitres *res)
5634 {
5635 struct inode *dst_inode = file_inode(dst);
5636 struct nfs_server *server = NFS_SERVER(dst_inode);
5637 struct rpc_message msg = {
5638 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
5639 .rpc_argp = args,
5640 .rpc_resp = res,
5641 };
5642
5643 args->fh = NFS_FH(dst_inode);
5644 return nfs4_call_sync(server->client, server, &msg,
5645 &args->seq_args, &res->seq_res, 1);
5646 }
5647
5648 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
5649 {
5650 struct nfs_commitargs args = {
5651 .offset = offset,
5652 .count = count,
5653 };
5654 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
5655 struct nfs4_exception exception = { };
5656 int status;
5657
5658 do {
5659 status = _nfs4_proc_commit(dst, &args, res);
5660 status = nfs4_handle_exception(dst_server, status, &exception);
5661 } while (exception.retry);
5662
5663 return status;
5664 }
5665
5666 struct nfs4_renewdata {
5667 struct nfs_client *client;
5668 unsigned long timestamp;
5669 };
5670
5671 /*
5672 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
5673 * standalone procedure for queueing an asynchronous RENEW.
5674 */
5675 static void nfs4_renew_release(void *calldata)
5676 {
5677 struct nfs4_renewdata *data = calldata;
5678 struct nfs_client *clp = data->client;
5679
5680 if (refcount_read(&clp->cl_count) > 1)
5681 nfs4_schedule_state_renewal(clp);
5682 nfs_put_client(clp);
5683 kfree(data);
5684 }
5685
5686 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
5687 {
5688 struct nfs4_renewdata *data = calldata;
5689 struct nfs_client *clp = data->client;
5690 unsigned long timestamp = data->timestamp;
5691
5692 trace_nfs4_renew_async(clp, task->tk_status);
5693 switch (task->tk_status) {
5694 case 0:
5695 break;
5696 case -NFS4ERR_LEASE_MOVED:
5697 nfs4_schedule_lease_moved_recovery(clp);
5698 break;
5699 default:
5700 /* Unless we're shutting down, schedule state recovery! */
5701 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
5702 return;
5703 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
5704 nfs4_schedule_lease_recovery(clp);
5705 return;
5706 }
5707 nfs4_schedule_path_down_recovery(clp);
5708 }
5709 do_renew_lease(clp, timestamp);
5710 }
5711
5712 static const struct rpc_call_ops nfs4_renew_ops = {
5713 .rpc_call_done = nfs4_renew_done,
5714 .rpc_release = nfs4_renew_release,
5715 };
5716
5717 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
5718 {
5719 struct rpc_message msg = {
5720 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5721 .rpc_argp = clp,
5722 .rpc_cred = cred,
5723 };
5724 struct nfs4_renewdata *data;
5725
5726 if (renew_flags == 0)
5727 return 0;
5728 if (!refcount_inc_not_zero(&clp->cl_count))
5729 return -EIO;
5730 data = kmalloc(sizeof(*data), GFP_NOFS);
5731 if (data == NULL) {
5732 nfs_put_client(clp);
5733 return -ENOMEM;
5734 }
5735 data->client = clp;
5736 data->timestamp = jiffies;
5737 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
5738 &nfs4_renew_ops, data);
5739 }
5740
5741 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
5742 {
5743 struct rpc_message msg = {
5744 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5745 .rpc_argp = clp,
5746 .rpc_cred = cred,
5747 };
5748 unsigned long now = jiffies;
5749 int status;
5750
5751 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5752 if (status < 0)
5753 return status;
5754 do_renew_lease(clp, now);
5755 return 0;
5756 }
5757
5758 static inline int nfs4_server_supports_acls(struct nfs_server *server)
5759 {
5760 return server->caps & NFS_CAP_ACLS;
5761 }
5762
5763 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
5764 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
5765 * the stack.
5766 */
5767 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
5768
5769 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
5770 struct page **pages)
5771 {
5772 struct page *newpage, **spages;
5773 int rc = 0;
5774 size_t len;
5775 spages = pages;
5776
5777 do {
5778 len = min_t(size_t, PAGE_SIZE, buflen);
5779 newpage = alloc_page(GFP_KERNEL);
5780
5781 if (newpage == NULL)
5782 goto unwind;
5783 memcpy(page_address(newpage), buf, len);
5784 buf += len;
5785 buflen -= len;
5786 *pages++ = newpage;
5787 rc++;
5788 } while (buflen != 0);
5789
5790 return rc;
5791
5792 unwind:
5793 for(; rc > 0; rc--)
5794 __free_page(spages[rc-1]);
5795 return -ENOMEM;
5796 }
5797
5798 struct nfs4_cached_acl {
5799 int cached;
5800 size_t len;
5801 char data[];
5802 };
5803
5804 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
5805 {
5806 struct nfs_inode *nfsi = NFS_I(inode);
5807
5808 spin_lock(&inode->i_lock);
5809 kfree(nfsi->nfs4_acl);
5810 nfsi->nfs4_acl = acl;
5811 spin_unlock(&inode->i_lock);
5812 }
5813
5814 static void nfs4_zap_acl_attr(struct inode *inode)
5815 {
5816 nfs4_set_cached_acl(inode, NULL);
5817 }
5818
5819 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
5820 {
5821 struct nfs_inode *nfsi = NFS_I(inode);
5822 struct nfs4_cached_acl *acl;
5823 int ret = -ENOENT;
5824
5825 spin_lock(&inode->i_lock);
5826 acl = nfsi->nfs4_acl;
5827 if (acl == NULL)
5828 goto out;
5829 if (buf == NULL) /* user is just asking for length */
5830 goto out_len;
5831 if (acl->cached == 0)
5832 goto out;
5833 ret = -ERANGE; /* see getxattr(2) man page */
5834 if (acl->len > buflen)
5835 goto out;
5836 memcpy(buf, acl->data, acl->len);
5837 out_len:
5838 ret = acl->len;
5839 out:
5840 spin_unlock(&inode->i_lock);
5841 return ret;
5842 }
5843
5844 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
5845 {
5846 struct nfs4_cached_acl *acl;
5847 size_t buflen = sizeof(*acl) + acl_len;
5848
5849 if (buflen <= PAGE_SIZE) {
5850 acl = kmalloc(buflen, GFP_KERNEL);
5851 if (acl == NULL)
5852 goto out;
5853 acl->cached = 1;
5854 _copy_from_pages(acl->data, pages, pgbase, acl_len);
5855 } else {
5856 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
5857 if (acl == NULL)
5858 goto out;
5859 acl->cached = 0;
5860 }
5861 acl->len = acl_len;
5862 out:
5863 nfs4_set_cached_acl(inode, acl);
5864 }
5865
5866 /*
5867 * The getxattr API returns the required buffer length when called with a
5868 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
5869 * the required buf. On a NULL buf, we send a page of data to the server
5870 * guessing that the ACL request can be serviced by a page. If so, we cache
5871 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
5872 * the cache. If not so, we throw away the page, and cache the required
5873 * length. The next getxattr call will then produce another round trip to
5874 * the server, this time with the input buf of the required size.
5875 */
5876 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5877 {
5878 struct page **pages;
5879 struct nfs_getaclargs args = {
5880 .fh = NFS_FH(inode),
5881 .acl_len = buflen,
5882 };
5883 struct nfs_getaclres res = {
5884 .acl_len = buflen,
5885 };
5886 struct rpc_message msg = {
5887 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
5888 .rpc_argp = &args,
5889 .rpc_resp = &res,
5890 };
5891 unsigned int npages;
5892 int ret = -ENOMEM, i;
5893 struct nfs_server *server = NFS_SERVER(inode);
5894
5895 if (buflen == 0)
5896 buflen = server->rsize;
5897
5898 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
5899 pages = kmalloc_array(npages, sizeof(struct page *), GFP_NOFS);
5900 if (!pages)
5901 return -ENOMEM;
5902
5903 args.acl_pages = pages;
5904
5905 for (i = 0; i < npages; i++) {
5906 pages[i] = alloc_page(GFP_KERNEL);
5907 if (!pages[i])
5908 goto out_free;
5909 }
5910
5911 /* for decoding across pages */
5912 res.acl_scratch = alloc_page(GFP_KERNEL);
5913 if (!res.acl_scratch)
5914 goto out_free;
5915
5916 args.acl_len = npages * PAGE_SIZE;
5917
5918 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
5919 __func__, buf, buflen, npages, args.acl_len);
5920 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
5921 &msg, &args.seq_args, &res.seq_res, 0);
5922 if (ret)
5923 goto out_free;
5924
5925 /* Handle the case where the passed-in buffer is too short */
5926 if (res.acl_flags & NFS4_ACL_TRUNC) {
5927 /* Did the user only issue a request for the acl length? */
5928 if (buf == NULL)
5929 goto out_ok;
5930 ret = -ERANGE;
5931 goto out_free;
5932 }
5933 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
5934 if (buf) {
5935 if (res.acl_len > buflen) {
5936 ret = -ERANGE;
5937 goto out_free;
5938 }
5939 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
5940 }
5941 out_ok:
5942 ret = res.acl_len;
5943 out_free:
5944 for (i = 0; i < npages; i++)
5945 if (pages[i])
5946 __free_page(pages[i]);
5947 if (res.acl_scratch)
5948 __free_page(res.acl_scratch);
5949 kfree(pages);
5950 return ret;
5951 }
5952
5953 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5954 {
5955 struct nfs4_exception exception = {
5956 .interruptible = true,
5957 };
5958 ssize_t ret;
5959 do {
5960 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
5961 trace_nfs4_get_acl(inode, ret);
5962 if (ret >= 0)
5963 break;
5964 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
5965 } while (exception.retry);
5966 return ret;
5967 }
5968
5969 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
5970 {
5971 struct nfs_server *server = NFS_SERVER(inode);
5972 int ret;
5973
5974 if (!nfs4_server_supports_acls(server))
5975 return -EOPNOTSUPP;
5976 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
5977 if (ret < 0)
5978 return ret;
5979 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
5980 nfs_zap_acl_cache(inode);
5981 ret = nfs4_read_cached_acl(inode, buf, buflen);
5982 if (ret != -ENOENT)
5983 /* -ENOENT is returned if there is no ACL or if there is an ACL
5984 * but no cached acl data, just the acl length */
5985 return ret;
5986 return nfs4_get_acl_uncached(inode, buf, buflen);
5987 }
5988
5989 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
5990 {
5991 struct nfs_server *server = NFS_SERVER(inode);
5992 struct page *pages[NFS4ACL_MAXPAGES];
5993 struct nfs_setaclargs arg = {
5994 .fh = NFS_FH(inode),
5995 .acl_pages = pages,
5996 .acl_len = buflen,
5997 };
5998 struct nfs_setaclres res;
5999 struct rpc_message msg = {
6000 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
6001 .rpc_argp = &arg,
6002 .rpc_resp = &res,
6003 };
6004 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
6005 int ret, i;
6006
6007 /* You can't remove system.nfs4_acl: */
6008 if (buflen == 0)
6009 return -EINVAL;
6010 if (!nfs4_server_supports_acls(server))
6011 return -EOPNOTSUPP;
6012 if (npages > ARRAY_SIZE(pages))
6013 return -ERANGE;
6014 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages);
6015 if (i < 0)
6016 return i;
6017 nfs4_inode_make_writeable(inode);
6018 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6019
6020 /*
6021 * Free each page after tx, so the only ref left is
6022 * held by the network stack
6023 */
6024 for (; i > 0; i--)
6025 put_page(pages[i-1]);
6026
6027 /*
6028 * Acl update can result in inode attribute update.
6029 * so mark the attribute cache invalid.
6030 */
6031 spin_lock(&inode->i_lock);
6032 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
6033 NFS_INO_INVALID_CTIME |
6034 NFS_INO_REVAL_FORCED);
6035 spin_unlock(&inode->i_lock);
6036 nfs_access_zap_cache(inode);
6037 nfs_zap_acl_cache(inode);
6038 return ret;
6039 }
6040
6041 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
6042 {
6043 struct nfs4_exception exception = { };
6044 int err;
6045 do {
6046 err = __nfs4_proc_set_acl(inode, buf, buflen);
6047 trace_nfs4_set_acl(inode, err);
6048 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
6049 /*
6050 * no need to retry since the kernel
6051 * isn't involved in encoding the ACEs.
6052 */
6053 err = -EINVAL;
6054 break;
6055 }
6056 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6057 &exception);
6058 } while (exception.retry);
6059 return err;
6060 }
6061
6062 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6063 static int _nfs4_get_security_label(struct inode *inode, void *buf,
6064 size_t buflen)
6065 {
6066 struct nfs_server *server = NFS_SERVER(inode);
6067 struct nfs_fattr fattr;
6068 struct nfs4_label label = {0, 0, buflen, buf};
6069
6070 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6071 struct nfs4_getattr_arg arg = {
6072 .fh = NFS_FH(inode),
6073 .bitmask = bitmask,
6074 };
6075 struct nfs4_getattr_res res = {
6076 .fattr = &fattr,
6077 .label = &label,
6078 .server = server,
6079 };
6080 struct rpc_message msg = {
6081 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
6082 .rpc_argp = &arg,
6083 .rpc_resp = &res,
6084 };
6085 int ret;
6086
6087 nfs_fattr_init(&fattr);
6088
6089 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
6090 if (ret)
6091 return ret;
6092 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
6093 return -ENOENT;
6094 return label.len;
6095 }
6096
6097 static int nfs4_get_security_label(struct inode *inode, void *buf,
6098 size_t buflen)
6099 {
6100 struct nfs4_exception exception = {
6101 .interruptible = true,
6102 };
6103 int err;
6104
6105 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6106 return -EOPNOTSUPP;
6107
6108 do {
6109 err = _nfs4_get_security_label(inode, buf, buflen);
6110 trace_nfs4_get_security_label(inode, err);
6111 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6112 &exception);
6113 } while (exception.retry);
6114 return err;
6115 }
6116
6117 static int _nfs4_do_set_security_label(struct inode *inode,
6118 struct nfs4_label *ilabel,
6119 struct nfs_fattr *fattr,
6120 struct nfs4_label *olabel)
6121 {
6122
6123 struct iattr sattr = {0};
6124 struct nfs_server *server = NFS_SERVER(inode);
6125 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6126 struct nfs_setattrargs arg = {
6127 .fh = NFS_FH(inode),
6128 .iap = &sattr,
6129 .server = server,
6130 .bitmask = bitmask,
6131 .label = ilabel,
6132 };
6133 struct nfs_setattrres res = {
6134 .fattr = fattr,
6135 .label = olabel,
6136 .server = server,
6137 };
6138 struct rpc_message msg = {
6139 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
6140 .rpc_argp = &arg,
6141 .rpc_resp = &res,
6142 };
6143 int status;
6144
6145 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
6146
6147 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6148 if (status)
6149 dprintk("%s failed: %d\n", __func__, status);
6150
6151 return status;
6152 }
6153
6154 static int nfs4_do_set_security_label(struct inode *inode,
6155 struct nfs4_label *ilabel,
6156 struct nfs_fattr *fattr,
6157 struct nfs4_label *olabel)
6158 {
6159 struct nfs4_exception exception = { };
6160 int err;
6161
6162 do {
6163 err = _nfs4_do_set_security_label(inode, ilabel,
6164 fattr, olabel);
6165 trace_nfs4_set_security_label(inode, err);
6166 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6167 &exception);
6168 } while (exception.retry);
6169 return err;
6170 }
6171
6172 static int
6173 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
6174 {
6175 struct nfs4_label ilabel, *olabel = NULL;
6176 struct nfs_fattr fattr;
6177 int status;
6178
6179 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6180 return -EOPNOTSUPP;
6181
6182 nfs_fattr_init(&fattr);
6183
6184 ilabel.pi = 0;
6185 ilabel.lfs = 0;
6186 ilabel.label = (char *)buf;
6187 ilabel.len = buflen;
6188
6189 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
6190 if (IS_ERR(olabel)) {
6191 status = -PTR_ERR(olabel);
6192 goto out;
6193 }
6194
6195 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
6196 if (status == 0)
6197 nfs_setsecurity(inode, &fattr, olabel);
6198
6199 nfs4_label_free(olabel);
6200 out:
6201 return status;
6202 }
6203 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
6204
6205
6206 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
6207 nfs4_verifier *bootverf)
6208 {
6209 __be32 verf[2];
6210
6211 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
6212 /* An impossible timestamp guarantees this value
6213 * will never match a generated boot time. */
6214 verf[0] = cpu_to_be32(U32_MAX);
6215 verf[1] = cpu_to_be32(U32_MAX);
6216 } else {
6217 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6218 u64 ns = ktime_to_ns(nn->boot_time);
6219
6220 verf[0] = cpu_to_be32(ns >> 32);
6221 verf[1] = cpu_to_be32(ns);
6222 }
6223 memcpy(bootverf->data, verf, sizeof(bootverf->data));
6224 }
6225
6226 static size_t
6227 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen)
6228 {
6229 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6230 struct nfs_netns_client *nn_clp = nn->nfs_client;
6231 const char *id;
6232
6233 buf[0] = '\0';
6234
6235 if (nn_clp) {
6236 rcu_read_lock();
6237 id = rcu_dereference(nn_clp->identifier);
6238 if (id)
6239 strscpy(buf, id, buflen);
6240 rcu_read_unlock();
6241 }
6242
6243 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0')
6244 strscpy(buf, nfs4_client_id_uniquifier, buflen);
6245
6246 return strlen(buf);
6247 }
6248
6249 static int
6250 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
6251 {
6252 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6253 size_t buflen;
6254 size_t len;
6255 char *str;
6256
6257 if (clp->cl_owner_id != NULL)
6258 return 0;
6259
6260 rcu_read_lock();
6261 len = 14 +
6262 strlen(clp->cl_rpcclient->cl_nodename) +
6263 1 +
6264 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
6265 1;
6266 rcu_read_unlock();
6267
6268 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6269 if (buflen)
6270 len += buflen + 1;
6271
6272 if (len > NFS4_OPAQUE_LIMIT + 1)
6273 return -EINVAL;
6274
6275 /*
6276 * Since this string is allocated at mount time, and held until the
6277 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6278 * about a memory-reclaim deadlock.
6279 */
6280 str = kmalloc(len, GFP_KERNEL);
6281 if (!str)
6282 return -ENOMEM;
6283
6284 rcu_read_lock();
6285 if (buflen)
6286 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s",
6287 clp->cl_rpcclient->cl_nodename, buf,
6288 rpc_peeraddr2str(clp->cl_rpcclient,
6289 RPC_DISPLAY_ADDR));
6290 else
6291 scnprintf(str, len, "Linux NFSv4.0 %s/%s",
6292 clp->cl_rpcclient->cl_nodename,
6293 rpc_peeraddr2str(clp->cl_rpcclient,
6294 RPC_DISPLAY_ADDR));
6295 rcu_read_unlock();
6296
6297 clp->cl_owner_id = str;
6298 return 0;
6299 }
6300
6301 static int
6302 nfs4_init_uniform_client_string(struct nfs_client *clp)
6303 {
6304 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6305 size_t buflen;
6306 size_t len;
6307 char *str;
6308
6309 if (clp->cl_owner_id != NULL)
6310 return 0;
6311
6312 len = 10 + 10 + 1 + 10 + 1 +
6313 strlen(clp->cl_rpcclient->cl_nodename) + 1;
6314
6315 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6316 if (buflen)
6317 len += buflen + 1;
6318
6319 if (len > NFS4_OPAQUE_LIMIT + 1)
6320 return -EINVAL;
6321
6322 /*
6323 * Since this string is allocated at mount time, and held until the
6324 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6325 * about a memory-reclaim deadlock.
6326 */
6327 str = kmalloc(len, GFP_KERNEL);
6328 if (!str)
6329 return -ENOMEM;
6330
6331 if (buflen)
6332 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
6333 clp->rpc_ops->version, clp->cl_minorversion,
6334 buf, clp->cl_rpcclient->cl_nodename);
6335 else
6336 scnprintf(str, len, "Linux NFSv%u.%u %s",
6337 clp->rpc_ops->version, clp->cl_minorversion,
6338 clp->cl_rpcclient->cl_nodename);
6339 clp->cl_owner_id = str;
6340 return 0;
6341 }
6342
6343 /*
6344 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
6345 * services. Advertise one based on the address family of the
6346 * clientaddr.
6347 */
6348 static unsigned int
6349 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
6350 {
6351 if (strchr(clp->cl_ipaddr, ':') != NULL)
6352 return scnprintf(buf, len, "tcp6");
6353 else
6354 return scnprintf(buf, len, "tcp");
6355 }
6356
6357 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
6358 {
6359 struct nfs4_setclientid *sc = calldata;
6360
6361 if (task->tk_status == 0)
6362 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
6363 }
6364
6365 static const struct rpc_call_ops nfs4_setclientid_ops = {
6366 .rpc_call_done = nfs4_setclientid_done,
6367 };
6368
6369 /**
6370 * nfs4_proc_setclientid - Negotiate client ID
6371 * @clp: state data structure
6372 * @program: RPC program for NFSv4 callback service
6373 * @port: IP port number for NFS4 callback service
6374 * @cred: credential to use for this call
6375 * @res: where to place the result
6376 *
6377 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6378 */
6379 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
6380 unsigned short port, const struct cred *cred,
6381 struct nfs4_setclientid_res *res)
6382 {
6383 nfs4_verifier sc_verifier;
6384 struct nfs4_setclientid setclientid = {
6385 .sc_verifier = &sc_verifier,
6386 .sc_prog = program,
6387 .sc_clnt = clp,
6388 };
6389 struct rpc_message msg = {
6390 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
6391 .rpc_argp = &setclientid,
6392 .rpc_resp = res,
6393 .rpc_cred = cred,
6394 };
6395 struct rpc_task_setup task_setup_data = {
6396 .rpc_client = clp->cl_rpcclient,
6397 .rpc_message = &msg,
6398 .callback_ops = &nfs4_setclientid_ops,
6399 .callback_data = &setclientid,
6400 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
6401 };
6402 unsigned long now = jiffies;
6403 int status;
6404
6405 /* nfs_client_id4 */
6406 nfs4_init_boot_verifier(clp, &sc_verifier);
6407
6408 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
6409 status = nfs4_init_uniform_client_string(clp);
6410 else
6411 status = nfs4_init_nonuniform_client_string(clp);
6412
6413 if (status)
6414 goto out;
6415
6416 /* cb_client4 */
6417 setclientid.sc_netid_len =
6418 nfs4_init_callback_netid(clp,
6419 setclientid.sc_netid,
6420 sizeof(setclientid.sc_netid));
6421 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
6422 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
6423 clp->cl_ipaddr, port >> 8, port & 255);
6424
6425 dprintk("NFS call setclientid auth=%s, '%s'\n",
6426 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6427 clp->cl_owner_id);
6428
6429 status = nfs4_call_sync_custom(&task_setup_data);
6430 if (setclientid.sc_cred) {
6431 kfree(clp->cl_acceptor);
6432 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
6433 put_rpccred(setclientid.sc_cred);
6434 }
6435
6436 if (status == 0)
6437 do_renew_lease(clp, now);
6438 out:
6439 trace_nfs4_setclientid(clp, status);
6440 dprintk("NFS reply setclientid: %d\n", status);
6441 return status;
6442 }
6443
6444 /**
6445 * nfs4_proc_setclientid_confirm - Confirm client ID
6446 * @clp: state data structure
6447 * @arg: result of a previous SETCLIENTID
6448 * @cred: credential to use for this call
6449 *
6450 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6451 */
6452 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
6453 struct nfs4_setclientid_res *arg,
6454 const struct cred *cred)
6455 {
6456 struct rpc_message msg = {
6457 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
6458 .rpc_argp = arg,
6459 .rpc_cred = cred,
6460 };
6461 int status;
6462
6463 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
6464 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6465 clp->cl_clientid);
6466 status = rpc_call_sync(clp->cl_rpcclient, &msg,
6467 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
6468 trace_nfs4_setclientid_confirm(clp, status);
6469 dprintk("NFS reply setclientid_confirm: %d\n", status);
6470 return status;
6471 }
6472
6473 struct nfs4_delegreturndata {
6474 struct nfs4_delegreturnargs args;
6475 struct nfs4_delegreturnres res;
6476 struct nfs_fh fh;
6477 nfs4_stateid stateid;
6478 unsigned long timestamp;
6479 struct {
6480 struct nfs4_layoutreturn_args arg;
6481 struct nfs4_layoutreturn_res res;
6482 struct nfs4_xdr_opaque_data ld_private;
6483 u32 roc_barrier;
6484 bool roc;
6485 } lr;
6486 struct nfs_fattr fattr;
6487 int rpc_status;
6488 struct inode *inode;
6489 };
6490
6491 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
6492 {
6493 struct nfs4_delegreturndata *data = calldata;
6494 struct nfs4_exception exception = {
6495 .inode = data->inode,
6496 .stateid = &data->stateid,
6497 .task_is_privileged = data->args.seq_args.sa_privileged,
6498 };
6499
6500 if (!nfs4_sequence_done(task, &data->res.seq_res))
6501 return;
6502
6503 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
6504
6505 /* Handle Layoutreturn errors */
6506 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
6507 &data->res.lr_ret) == -EAGAIN)
6508 goto out_restart;
6509
6510 switch (task->tk_status) {
6511 case 0:
6512 renew_lease(data->res.server, data->timestamp);
6513 break;
6514 case -NFS4ERR_ADMIN_REVOKED:
6515 case -NFS4ERR_DELEG_REVOKED:
6516 case -NFS4ERR_EXPIRED:
6517 nfs4_free_revoked_stateid(data->res.server,
6518 data->args.stateid,
6519 task->tk_msg.rpc_cred);
6520 fallthrough;
6521 case -NFS4ERR_BAD_STATEID:
6522 case -NFS4ERR_STALE_STATEID:
6523 case -ETIMEDOUT:
6524 task->tk_status = 0;
6525 break;
6526 case -NFS4ERR_OLD_STATEID:
6527 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
6528 nfs4_stateid_seqid_inc(&data->stateid);
6529 if (data->args.bitmask) {
6530 data->args.bitmask = NULL;
6531 data->res.fattr = NULL;
6532 }
6533 goto out_restart;
6534 case -NFS4ERR_ACCESS:
6535 if (data->args.bitmask) {
6536 data->args.bitmask = NULL;
6537 data->res.fattr = NULL;
6538 goto out_restart;
6539 }
6540 fallthrough;
6541 default:
6542 task->tk_status = nfs4_async_handle_exception(task,
6543 data->res.server, task->tk_status,
6544 &exception);
6545 if (exception.retry)
6546 goto out_restart;
6547 }
6548 nfs_delegation_mark_returned(data->inode, data->args.stateid);
6549 data->rpc_status = task->tk_status;
6550 return;
6551 out_restart:
6552 task->tk_status = 0;
6553 rpc_restart_call_prepare(task);
6554 }
6555
6556 static void nfs4_delegreturn_release(void *calldata)
6557 {
6558 struct nfs4_delegreturndata *data = calldata;
6559 struct inode *inode = data->inode;
6560
6561 if (data->lr.roc)
6562 pnfs_roc_release(&data->lr.arg, &data->lr.res,
6563 data->res.lr_ret);
6564 if (inode) {
6565 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
6566 nfs_iput_and_deactive(inode);
6567 }
6568 kfree(calldata);
6569 }
6570
6571 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
6572 {
6573 struct nfs4_delegreturndata *d_data;
6574 struct pnfs_layout_hdr *lo;
6575
6576 d_data = (struct nfs4_delegreturndata *)data;
6577
6578 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
6579 nfs4_sequence_done(task, &d_data->res.seq_res);
6580 return;
6581 }
6582
6583 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
6584 if (lo && !pnfs_layout_is_valid(lo)) {
6585 d_data->args.lr_args = NULL;
6586 d_data->res.lr_res = NULL;
6587 }
6588
6589 nfs4_setup_sequence(d_data->res.server->nfs_client,
6590 &d_data->args.seq_args,
6591 &d_data->res.seq_res,
6592 task);
6593 }
6594
6595 static const struct rpc_call_ops nfs4_delegreturn_ops = {
6596 .rpc_call_prepare = nfs4_delegreturn_prepare,
6597 .rpc_call_done = nfs4_delegreturn_done,
6598 .rpc_release = nfs4_delegreturn_release,
6599 };
6600
6601 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6602 {
6603 struct nfs4_delegreturndata *data;
6604 struct nfs_server *server = NFS_SERVER(inode);
6605 struct rpc_task *task;
6606 struct rpc_message msg = {
6607 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
6608 .rpc_cred = cred,
6609 };
6610 struct rpc_task_setup task_setup_data = {
6611 .rpc_client = server->client,
6612 .rpc_message = &msg,
6613 .callback_ops = &nfs4_delegreturn_ops,
6614 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
6615 };
6616 int status = 0;
6617
6618 data = kzalloc(sizeof(*data), GFP_NOFS);
6619 if (data == NULL)
6620 return -ENOMEM;
6621
6622 nfs4_state_protect(server->nfs_client,
6623 NFS_SP4_MACH_CRED_CLEANUP,
6624 &task_setup_data.rpc_client, &msg);
6625
6626 data->args.fhandle = &data->fh;
6627 data->args.stateid = &data->stateid;
6628 nfs4_bitmask_set(data->args.bitmask_store,
6629 server->cache_consistency_bitmask, inode, server,
6630 NULL);
6631 data->args.bitmask = data->args.bitmask_store;
6632 nfs_copy_fh(&data->fh, NFS_FH(inode));
6633 nfs4_stateid_copy(&data->stateid, stateid);
6634 data->res.fattr = &data->fattr;
6635 data->res.server = server;
6636 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
6637 data->lr.arg.ld_private = &data->lr.ld_private;
6638 nfs_fattr_init(data->res.fattr);
6639 data->timestamp = jiffies;
6640 data->rpc_status = 0;
6641 data->inode = nfs_igrab_and_active(inode);
6642 if (data->inode || issync) {
6643 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
6644 cred);
6645 if (data->lr.roc) {
6646 data->args.lr_args = &data->lr.arg;
6647 data->res.lr_res = &data->lr.res;
6648 }
6649 }
6650
6651 if (!data->inode)
6652 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6653 1);
6654 else
6655 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6656 0);
6657 task_setup_data.callback_data = data;
6658 msg.rpc_argp = &data->args;
6659 msg.rpc_resp = &data->res;
6660 task = rpc_run_task(&task_setup_data);
6661 if (IS_ERR(task))
6662 return PTR_ERR(task);
6663 if (!issync)
6664 goto out;
6665 status = rpc_wait_for_completion_task(task);
6666 if (status != 0)
6667 goto out;
6668 status = data->rpc_status;
6669 out:
6670 rpc_put_task(task);
6671 return status;
6672 }
6673
6674 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6675 {
6676 struct nfs_server *server = NFS_SERVER(inode);
6677 struct nfs4_exception exception = { };
6678 int err;
6679 do {
6680 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
6681 trace_nfs4_delegreturn(inode, stateid, err);
6682 switch (err) {
6683 case -NFS4ERR_STALE_STATEID:
6684 case -NFS4ERR_EXPIRED:
6685 case 0:
6686 return 0;
6687 }
6688 err = nfs4_handle_exception(server, err, &exception);
6689 } while (exception.retry);
6690 return err;
6691 }
6692
6693 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6694 {
6695 struct inode *inode = state->inode;
6696 struct nfs_server *server = NFS_SERVER(inode);
6697 struct nfs_client *clp = server->nfs_client;
6698 struct nfs_lockt_args arg = {
6699 .fh = NFS_FH(inode),
6700 .fl = request,
6701 };
6702 struct nfs_lockt_res res = {
6703 .denied = request,
6704 };
6705 struct rpc_message msg = {
6706 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
6707 .rpc_argp = &arg,
6708 .rpc_resp = &res,
6709 .rpc_cred = state->owner->so_cred,
6710 };
6711 struct nfs4_lock_state *lsp;
6712 int status;
6713
6714 arg.lock_owner.clientid = clp->cl_clientid;
6715 status = nfs4_set_lock_state(state, request);
6716 if (status != 0)
6717 goto out;
6718 lsp = request->fl_u.nfs4_fl.owner;
6719 arg.lock_owner.id = lsp->ls_seqid.owner_id;
6720 arg.lock_owner.s_dev = server->s_dev;
6721 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6722 switch (status) {
6723 case 0:
6724 request->fl_type = F_UNLCK;
6725 break;
6726 case -NFS4ERR_DENIED:
6727 status = 0;
6728 }
6729 request->fl_ops->fl_release_private(request);
6730 request->fl_ops = NULL;
6731 out:
6732 return status;
6733 }
6734
6735 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6736 {
6737 struct nfs4_exception exception = {
6738 .interruptible = true,
6739 };
6740 int err;
6741
6742 do {
6743 err = _nfs4_proc_getlk(state, cmd, request);
6744 trace_nfs4_get_lock(request, state, cmd, err);
6745 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
6746 &exception);
6747 } while (exception.retry);
6748 return err;
6749 }
6750
6751 /*
6752 * Update the seqid of a lock stateid after receiving
6753 * NFS4ERR_OLD_STATEID
6754 */
6755 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
6756 struct nfs4_lock_state *lsp)
6757 {
6758 struct nfs4_state *state = lsp->ls_state;
6759 bool ret = false;
6760
6761 spin_lock(&state->state_lock);
6762 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
6763 goto out;
6764 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
6765 nfs4_stateid_seqid_inc(dst);
6766 else
6767 dst->seqid = lsp->ls_stateid.seqid;
6768 ret = true;
6769 out:
6770 spin_unlock(&state->state_lock);
6771 return ret;
6772 }
6773
6774 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
6775 struct nfs4_lock_state *lsp)
6776 {
6777 struct nfs4_state *state = lsp->ls_state;
6778 bool ret;
6779
6780 spin_lock(&state->state_lock);
6781 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
6782 nfs4_stateid_copy(dst, &lsp->ls_stateid);
6783 spin_unlock(&state->state_lock);
6784 return ret;
6785 }
6786
6787 struct nfs4_unlockdata {
6788 struct nfs_locku_args arg;
6789 struct nfs_locku_res res;
6790 struct nfs4_lock_state *lsp;
6791 struct nfs_open_context *ctx;
6792 struct nfs_lock_context *l_ctx;
6793 struct file_lock fl;
6794 struct nfs_server *server;
6795 unsigned long timestamp;
6796 };
6797
6798 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
6799 struct nfs_open_context *ctx,
6800 struct nfs4_lock_state *lsp,
6801 struct nfs_seqid *seqid)
6802 {
6803 struct nfs4_unlockdata *p;
6804 struct nfs4_state *state = lsp->ls_state;
6805 struct inode *inode = state->inode;
6806
6807 p = kzalloc(sizeof(*p), GFP_NOFS);
6808 if (p == NULL)
6809 return NULL;
6810 p->arg.fh = NFS_FH(inode);
6811 p->arg.fl = &p->fl;
6812 p->arg.seqid = seqid;
6813 p->res.seqid = seqid;
6814 p->lsp = lsp;
6815 /* Ensure we don't close file until we're done freeing locks! */
6816 p->ctx = get_nfs_open_context(ctx);
6817 p->l_ctx = nfs_get_lock_context(ctx);
6818 locks_init_lock(&p->fl);
6819 locks_copy_lock(&p->fl, fl);
6820 p->server = NFS_SERVER(inode);
6821 spin_lock(&state->state_lock);
6822 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
6823 spin_unlock(&state->state_lock);
6824 return p;
6825 }
6826
6827 static void nfs4_locku_release_calldata(void *data)
6828 {
6829 struct nfs4_unlockdata *calldata = data;
6830 nfs_free_seqid(calldata->arg.seqid);
6831 nfs4_put_lock_state(calldata->lsp);
6832 nfs_put_lock_context(calldata->l_ctx);
6833 put_nfs_open_context(calldata->ctx);
6834 kfree(calldata);
6835 }
6836
6837 static void nfs4_locku_done(struct rpc_task *task, void *data)
6838 {
6839 struct nfs4_unlockdata *calldata = data;
6840 struct nfs4_exception exception = {
6841 .inode = calldata->lsp->ls_state->inode,
6842 .stateid = &calldata->arg.stateid,
6843 };
6844
6845 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
6846 return;
6847 switch (task->tk_status) {
6848 case 0:
6849 renew_lease(calldata->server, calldata->timestamp);
6850 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
6851 if (nfs4_update_lock_stateid(calldata->lsp,
6852 &calldata->res.stateid))
6853 break;
6854 fallthrough;
6855 case -NFS4ERR_ADMIN_REVOKED:
6856 case -NFS4ERR_EXPIRED:
6857 nfs4_free_revoked_stateid(calldata->server,
6858 &calldata->arg.stateid,
6859 task->tk_msg.rpc_cred);
6860 fallthrough;
6861 case -NFS4ERR_BAD_STATEID:
6862 case -NFS4ERR_STALE_STATEID:
6863 if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
6864 calldata->lsp))
6865 rpc_restart_call_prepare(task);
6866 break;
6867 case -NFS4ERR_OLD_STATEID:
6868 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
6869 calldata->lsp))
6870 rpc_restart_call_prepare(task);
6871 break;
6872 default:
6873 task->tk_status = nfs4_async_handle_exception(task,
6874 calldata->server, task->tk_status,
6875 &exception);
6876 if (exception.retry)
6877 rpc_restart_call_prepare(task);
6878 }
6879 nfs_release_seqid(calldata->arg.seqid);
6880 }
6881
6882 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
6883 {
6884 struct nfs4_unlockdata *calldata = data;
6885
6886 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
6887 nfs_async_iocounter_wait(task, calldata->l_ctx))
6888 return;
6889
6890 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
6891 goto out_wait;
6892 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
6893 /* Note: exit _without_ running nfs4_locku_done */
6894 goto out_no_action;
6895 }
6896 calldata->timestamp = jiffies;
6897 if (nfs4_setup_sequence(calldata->server->nfs_client,
6898 &calldata->arg.seq_args,
6899 &calldata->res.seq_res,
6900 task) != 0)
6901 nfs_release_seqid(calldata->arg.seqid);
6902 return;
6903 out_no_action:
6904 task->tk_action = NULL;
6905 out_wait:
6906 nfs4_sequence_done(task, &calldata->res.seq_res);
6907 }
6908
6909 static const struct rpc_call_ops nfs4_locku_ops = {
6910 .rpc_call_prepare = nfs4_locku_prepare,
6911 .rpc_call_done = nfs4_locku_done,
6912 .rpc_release = nfs4_locku_release_calldata,
6913 };
6914
6915 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
6916 struct nfs_open_context *ctx,
6917 struct nfs4_lock_state *lsp,
6918 struct nfs_seqid *seqid)
6919 {
6920 struct nfs4_unlockdata *data;
6921 struct rpc_message msg = {
6922 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
6923 .rpc_cred = ctx->cred,
6924 };
6925 struct rpc_task_setup task_setup_data = {
6926 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
6927 .rpc_message = &msg,
6928 .callback_ops = &nfs4_locku_ops,
6929 .workqueue = nfsiod_workqueue,
6930 .flags = RPC_TASK_ASYNC,
6931 };
6932 struct nfs_client *client =
6933 NFS_SERVER(lsp->ls_state->inode)->nfs_client;
6934
6935 if (client->cl_minorversion)
6936 task_setup_data.flags |= RPC_TASK_MOVEABLE;
6937
6938 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
6939 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
6940
6941 /* Ensure this is an unlock - when canceling a lock, the
6942 * canceled lock is passed in, and it won't be an unlock.
6943 */
6944 fl->fl_type = F_UNLCK;
6945 if (fl->fl_flags & FL_CLOSE)
6946 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
6947
6948 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
6949 if (data == NULL) {
6950 nfs_free_seqid(seqid);
6951 return ERR_PTR(-ENOMEM);
6952 }
6953
6954 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
6955 msg.rpc_argp = &data->arg;
6956 msg.rpc_resp = &data->res;
6957 task_setup_data.callback_data = data;
6958 return rpc_run_task(&task_setup_data);
6959 }
6960
6961 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
6962 {
6963 struct inode *inode = state->inode;
6964 struct nfs4_state_owner *sp = state->owner;
6965 struct nfs_inode *nfsi = NFS_I(inode);
6966 struct nfs_seqid *seqid;
6967 struct nfs4_lock_state *lsp;
6968 struct rpc_task *task;
6969 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6970 int status = 0;
6971 unsigned char fl_flags = request->fl_flags;
6972
6973 status = nfs4_set_lock_state(state, request);
6974 /* Unlock _before_ we do the RPC call */
6975 request->fl_flags |= FL_EXISTS;
6976 /* Exclude nfs_delegation_claim_locks() */
6977 mutex_lock(&sp->so_delegreturn_mutex);
6978 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
6979 down_read(&nfsi->rwsem);
6980 if (locks_lock_inode_wait(inode, request) == -ENOENT) {
6981 up_read(&nfsi->rwsem);
6982 mutex_unlock(&sp->so_delegreturn_mutex);
6983 goto out;
6984 }
6985 up_read(&nfsi->rwsem);
6986 mutex_unlock(&sp->so_delegreturn_mutex);
6987 if (status != 0)
6988 goto out;
6989 /* Is this a delegated lock? */
6990 lsp = request->fl_u.nfs4_fl.owner;
6991 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
6992 goto out;
6993 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
6994 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
6995 status = -ENOMEM;
6996 if (IS_ERR(seqid))
6997 goto out;
6998 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
6999 status = PTR_ERR(task);
7000 if (IS_ERR(task))
7001 goto out;
7002 status = rpc_wait_for_completion_task(task);
7003 rpc_put_task(task);
7004 out:
7005 request->fl_flags = fl_flags;
7006 trace_nfs4_unlock(request, state, F_SETLK, status);
7007 return status;
7008 }
7009
7010 struct nfs4_lockdata {
7011 struct nfs_lock_args arg;
7012 struct nfs_lock_res res;
7013 struct nfs4_lock_state *lsp;
7014 struct nfs_open_context *ctx;
7015 struct file_lock fl;
7016 unsigned long timestamp;
7017 int rpc_status;
7018 int cancelled;
7019 struct nfs_server *server;
7020 };
7021
7022 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
7023 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
7024 gfp_t gfp_mask)
7025 {
7026 struct nfs4_lockdata *p;
7027 struct inode *inode = lsp->ls_state->inode;
7028 struct nfs_server *server = NFS_SERVER(inode);
7029 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
7030
7031 p = kzalloc(sizeof(*p), gfp_mask);
7032 if (p == NULL)
7033 return NULL;
7034
7035 p->arg.fh = NFS_FH(inode);
7036 p->arg.fl = &p->fl;
7037 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
7038 if (IS_ERR(p->arg.open_seqid))
7039 goto out_free;
7040 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
7041 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
7042 if (IS_ERR(p->arg.lock_seqid))
7043 goto out_free_seqid;
7044 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
7045 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
7046 p->arg.lock_owner.s_dev = server->s_dev;
7047 p->res.lock_seqid = p->arg.lock_seqid;
7048 p->lsp = lsp;
7049 p->server = server;
7050 p->ctx = get_nfs_open_context(ctx);
7051 locks_init_lock(&p->fl);
7052 locks_copy_lock(&p->fl, fl);
7053 return p;
7054 out_free_seqid:
7055 nfs_free_seqid(p->arg.open_seqid);
7056 out_free:
7057 kfree(p);
7058 return NULL;
7059 }
7060
7061 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
7062 {
7063 struct nfs4_lockdata *data = calldata;
7064 struct nfs4_state *state = data->lsp->ls_state;
7065
7066 dprintk("%s: begin!\n", __func__);
7067 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
7068 goto out_wait;
7069 /* Do we need to do an open_to_lock_owner? */
7070 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
7071 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
7072 goto out_release_lock_seqid;
7073 }
7074 nfs4_stateid_copy(&data->arg.open_stateid,
7075 &state->open_stateid);
7076 data->arg.new_lock_owner = 1;
7077 data->res.open_seqid = data->arg.open_seqid;
7078 } else {
7079 data->arg.new_lock_owner = 0;
7080 nfs4_stateid_copy(&data->arg.lock_stateid,
7081 &data->lsp->ls_stateid);
7082 }
7083 if (!nfs4_valid_open_stateid(state)) {
7084 data->rpc_status = -EBADF;
7085 task->tk_action = NULL;
7086 goto out_release_open_seqid;
7087 }
7088 data->timestamp = jiffies;
7089 if (nfs4_setup_sequence(data->server->nfs_client,
7090 &data->arg.seq_args,
7091 &data->res.seq_res,
7092 task) == 0)
7093 return;
7094 out_release_open_seqid:
7095 nfs_release_seqid(data->arg.open_seqid);
7096 out_release_lock_seqid:
7097 nfs_release_seqid(data->arg.lock_seqid);
7098 out_wait:
7099 nfs4_sequence_done(task, &data->res.seq_res);
7100 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
7101 }
7102
7103 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
7104 {
7105 struct nfs4_lockdata *data = calldata;
7106 struct nfs4_lock_state *lsp = data->lsp;
7107
7108 dprintk("%s: begin!\n", __func__);
7109
7110 if (!nfs4_sequence_done(task, &data->res.seq_res))
7111 return;
7112
7113 data->rpc_status = task->tk_status;
7114 switch (task->tk_status) {
7115 case 0:
7116 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
7117 data->timestamp);
7118 if (data->arg.new_lock && !data->cancelled) {
7119 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
7120 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
7121 goto out_restart;
7122 }
7123 if (data->arg.new_lock_owner != 0) {
7124 nfs_confirm_seqid(&lsp->ls_seqid, 0);
7125 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
7126 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
7127 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
7128 goto out_restart;
7129 break;
7130 case -NFS4ERR_BAD_STATEID:
7131 case -NFS4ERR_OLD_STATEID:
7132 case -NFS4ERR_STALE_STATEID:
7133 case -NFS4ERR_EXPIRED:
7134 if (data->arg.new_lock_owner != 0) {
7135 if (!nfs4_stateid_match(&data->arg.open_stateid,
7136 &lsp->ls_state->open_stateid))
7137 goto out_restart;
7138 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
7139 &lsp->ls_stateid))
7140 goto out_restart;
7141 }
7142 out_done:
7143 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
7144 return;
7145 out_restart:
7146 if (!data->cancelled)
7147 rpc_restart_call_prepare(task);
7148 goto out_done;
7149 }
7150
7151 static void nfs4_lock_release(void *calldata)
7152 {
7153 struct nfs4_lockdata *data = calldata;
7154
7155 dprintk("%s: begin!\n", __func__);
7156 nfs_free_seqid(data->arg.open_seqid);
7157 if (data->cancelled && data->rpc_status == 0) {
7158 struct rpc_task *task;
7159 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
7160 data->arg.lock_seqid);
7161 if (!IS_ERR(task))
7162 rpc_put_task_async(task);
7163 dprintk("%s: cancelling lock!\n", __func__);
7164 } else
7165 nfs_free_seqid(data->arg.lock_seqid);
7166 nfs4_put_lock_state(data->lsp);
7167 put_nfs_open_context(data->ctx);
7168 kfree(data);
7169 dprintk("%s: done!\n", __func__);
7170 }
7171
7172 static const struct rpc_call_ops nfs4_lock_ops = {
7173 .rpc_call_prepare = nfs4_lock_prepare,
7174 .rpc_call_done = nfs4_lock_done,
7175 .rpc_release = nfs4_lock_release,
7176 };
7177
7178 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
7179 {
7180 switch (error) {
7181 case -NFS4ERR_ADMIN_REVOKED:
7182 case -NFS4ERR_EXPIRED:
7183 case -NFS4ERR_BAD_STATEID:
7184 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7185 if (new_lock_owner != 0 ||
7186 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
7187 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
7188 break;
7189 case -NFS4ERR_STALE_STATEID:
7190 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7191 nfs4_schedule_lease_recovery(server->nfs_client);
7192 }
7193 }
7194
7195 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
7196 {
7197 struct nfs4_lockdata *data;
7198 struct rpc_task *task;
7199 struct rpc_message msg = {
7200 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
7201 .rpc_cred = state->owner->so_cred,
7202 };
7203 struct rpc_task_setup task_setup_data = {
7204 .rpc_client = NFS_CLIENT(state->inode),
7205 .rpc_message = &msg,
7206 .callback_ops = &nfs4_lock_ops,
7207 .workqueue = nfsiod_workqueue,
7208 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
7209 };
7210 int ret;
7211 struct nfs_client *client = NFS_SERVER(state->inode)->nfs_client;
7212
7213 if (client->cl_minorversion)
7214 task_setup_data.flags |= RPC_TASK_MOVEABLE;
7215
7216 dprintk("%s: begin!\n", __func__);
7217 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
7218 fl->fl_u.nfs4_fl.owner,
7219 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
7220 if (data == NULL)
7221 return -ENOMEM;
7222 if (IS_SETLKW(cmd))
7223 data->arg.block = 1;
7224 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1,
7225 recovery_type > NFS_LOCK_NEW);
7226 msg.rpc_argp = &data->arg;
7227 msg.rpc_resp = &data->res;
7228 task_setup_data.callback_data = data;
7229 if (recovery_type > NFS_LOCK_NEW) {
7230 if (recovery_type == NFS_LOCK_RECLAIM)
7231 data->arg.reclaim = NFS_LOCK_RECLAIM;
7232 } else
7233 data->arg.new_lock = 1;
7234 task = rpc_run_task(&task_setup_data);
7235 if (IS_ERR(task))
7236 return PTR_ERR(task);
7237 ret = rpc_wait_for_completion_task(task);
7238 if (ret == 0) {
7239 ret = data->rpc_status;
7240 if (ret)
7241 nfs4_handle_setlk_error(data->server, data->lsp,
7242 data->arg.new_lock_owner, ret);
7243 } else
7244 data->cancelled = true;
7245 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
7246 rpc_put_task(task);
7247 dprintk("%s: done, ret = %d!\n", __func__, ret);
7248 return ret;
7249 }
7250
7251 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
7252 {
7253 struct nfs_server *server = NFS_SERVER(state->inode);
7254 struct nfs4_exception exception = {
7255 .inode = state->inode,
7256 };
7257 int err;
7258
7259 do {
7260 /* Cache the lock if possible... */
7261 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7262 return 0;
7263 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
7264 if (err != -NFS4ERR_DELAY)
7265 break;
7266 nfs4_handle_exception(server, err, &exception);
7267 } while (exception.retry);
7268 return err;
7269 }
7270
7271 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
7272 {
7273 struct nfs_server *server = NFS_SERVER(state->inode);
7274 struct nfs4_exception exception = {
7275 .inode = state->inode,
7276 };
7277 int err;
7278
7279 err = nfs4_set_lock_state(state, request);
7280 if (err != 0)
7281 return err;
7282 if (!recover_lost_locks) {
7283 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
7284 return 0;
7285 }
7286 do {
7287 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7288 return 0;
7289 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
7290 switch (err) {
7291 default:
7292 goto out;
7293 case -NFS4ERR_GRACE:
7294 case -NFS4ERR_DELAY:
7295 nfs4_handle_exception(server, err, &exception);
7296 err = 0;
7297 }
7298 } while (exception.retry);
7299 out:
7300 return err;
7301 }
7302
7303 #if defined(CONFIG_NFS_V4_1)
7304 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
7305 {
7306 struct nfs4_lock_state *lsp;
7307 int status;
7308
7309 status = nfs4_set_lock_state(state, request);
7310 if (status != 0)
7311 return status;
7312 lsp = request->fl_u.nfs4_fl.owner;
7313 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
7314 test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
7315 return 0;
7316 return nfs4_lock_expired(state, request);
7317 }
7318 #endif
7319
7320 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7321 {
7322 struct nfs_inode *nfsi = NFS_I(state->inode);
7323 struct nfs4_state_owner *sp = state->owner;
7324 unsigned char fl_flags = request->fl_flags;
7325 int status;
7326
7327 request->fl_flags |= FL_ACCESS;
7328 status = locks_lock_inode_wait(state->inode, request);
7329 if (status < 0)
7330 goto out;
7331 mutex_lock(&sp->so_delegreturn_mutex);
7332 down_read(&nfsi->rwsem);
7333 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
7334 /* Yes: cache locks! */
7335 /* ...but avoid races with delegation recall... */
7336 request->fl_flags = fl_flags & ~FL_SLEEP;
7337 status = locks_lock_inode_wait(state->inode, request);
7338 up_read(&nfsi->rwsem);
7339 mutex_unlock(&sp->so_delegreturn_mutex);
7340 goto out;
7341 }
7342 up_read(&nfsi->rwsem);
7343 mutex_unlock(&sp->so_delegreturn_mutex);
7344 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
7345 out:
7346 request->fl_flags = fl_flags;
7347 return status;
7348 }
7349
7350 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7351 {
7352 struct nfs4_exception exception = {
7353 .state = state,
7354 .inode = state->inode,
7355 .interruptible = true,
7356 };
7357 int err;
7358
7359 do {
7360 err = _nfs4_proc_setlk(state, cmd, request);
7361 if (err == -NFS4ERR_DENIED)
7362 err = -EAGAIN;
7363 err = nfs4_handle_exception(NFS_SERVER(state->inode),
7364 err, &exception);
7365 } while (exception.retry);
7366 return err;
7367 }
7368
7369 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
7370 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
7371
7372 static int
7373 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
7374 struct file_lock *request)
7375 {
7376 int status = -ERESTARTSYS;
7377 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
7378
7379 while(!signalled()) {
7380 status = nfs4_proc_setlk(state, cmd, request);
7381 if ((status != -EAGAIN) || IS_SETLK(cmd))
7382 break;
7383 freezable_schedule_timeout_interruptible(timeout);
7384 timeout *= 2;
7385 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
7386 status = -ERESTARTSYS;
7387 }
7388 return status;
7389 }
7390
7391 #ifdef CONFIG_NFS_V4_1
7392 struct nfs4_lock_waiter {
7393 struct inode *inode;
7394 struct nfs_lowner owner;
7395 wait_queue_entry_t wait;
7396 };
7397
7398 static int
7399 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
7400 {
7401 struct nfs4_lock_waiter *waiter =
7402 container_of(wait, struct nfs4_lock_waiter, wait);
7403
7404 /* NULL key means to wake up everyone */
7405 if (key) {
7406 struct cb_notify_lock_args *cbnl = key;
7407 struct nfs_lowner *lowner = &cbnl->cbnl_owner,
7408 *wowner = &waiter->owner;
7409
7410 /* Only wake if the callback was for the same owner. */
7411 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
7412 return 0;
7413
7414 /* Make sure it's for the right inode */
7415 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
7416 return 0;
7417 }
7418
7419 return woken_wake_function(wait, mode, flags, key);
7420 }
7421
7422 static int
7423 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7424 {
7425 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
7426 struct nfs_server *server = NFS_SERVER(state->inode);
7427 struct nfs_client *clp = server->nfs_client;
7428 wait_queue_head_t *q = &clp->cl_lock_waitq;
7429 struct nfs4_lock_waiter waiter = {
7430 .inode = state->inode,
7431 .owner = { .clientid = clp->cl_clientid,
7432 .id = lsp->ls_seqid.owner_id,
7433 .s_dev = server->s_dev },
7434 };
7435 int status;
7436
7437 /* Don't bother with waitqueue if we don't expect a callback */
7438 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
7439 return nfs4_retry_setlk_simple(state, cmd, request);
7440
7441 init_wait(&waiter.wait);
7442 waiter.wait.func = nfs4_wake_lock_waiter;
7443 add_wait_queue(q, &waiter.wait);
7444
7445 do {
7446 status = nfs4_proc_setlk(state, cmd, request);
7447 if (status != -EAGAIN || IS_SETLK(cmd))
7448 break;
7449
7450 status = -ERESTARTSYS;
7451 freezer_do_not_count();
7452 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE,
7453 NFS4_LOCK_MAXTIMEOUT);
7454 freezer_count();
7455 } while (!signalled());
7456
7457 remove_wait_queue(q, &waiter.wait);
7458
7459 return status;
7460 }
7461 #else /* !CONFIG_NFS_V4_1 */
7462 static inline int
7463 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7464 {
7465 return nfs4_retry_setlk_simple(state, cmd, request);
7466 }
7467 #endif
7468
7469 static int
7470 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
7471 {
7472 struct nfs_open_context *ctx;
7473 struct nfs4_state *state;
7474 int status;
7475
7476 /* verify open state */
7477 ctx = nfs_file_open_context(filp);
7478 state = ctx->state;
7479
7480 if (IS_GETLK(cmd)) {
7481 if (state != NULL)
7482 return nfs4_proc_getlk(state, F_GETLK, request);
7483 return 0;
7484 }
7485
7486 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
7487 return -EINVAL;
7488
7489 if (request->fl_type == F_UNLCK) {
7490 if (state != NULL)
7491 return nfs4_proc_unlck(state, cmd, request);
7492 return 0;
7493 }
7494
7495 if (state == NULL)
7496 return -ENOLCK;
7497
7498 if ((request->fl_flags & FL_POSIX) &&
7499 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
7500 return -ENOLCK;
7501
7502 /*
7503 * Don't rely on the VFS having checked the file open mode,
7504 * since it won't do this for flock() locks.
7505 */
7506 switch (request->fl_type) {
7507 case F_RDLCK:
7508 if (!(filp->f_mode & FMODE_READ))
7509 return -EBADF;
7510 break;
7511 case F_WRLCK:
7512 if (!(filp->f_mode & FMODE_WRITE))
7513 return -EBADF;
7514 }
7515
7516 status = nfs4_set_lock_state(state, request);
7517 if (status != 0)
7518 return status;
7519
7520 return nfs4_retry_setlk(state, cmd, request);
7521 }
7522
7523 static int nfs4_delete_lease(struct file *file, void **priv)
7524 {
7525 return generic_setlease(file, F_UNLCK, NULL, priv);
7526 }
7527
7528 static int nfs4_add_lease(struct file *file, long arg, struct file_lock **lease,
7529 void **priv)
7530 {
7531 struct inode *inode = file_inode(file);
7532 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE;
7533 int ret;
7534
7535 /* No delegation, no lease */
7536 if (!nfs4_have_delegation(inode, type))
7537 return -EAGAIN;
7538 ret = generic_setlease(file, arg, lease, priv);
7539 if (ret || nfs4_have_delegation(inode, type))
7540 return ret;
7541 /* We raced with a delegation return */
7542 nfs4_delete_lease(file, priv);
7543 return -EAGAIN;
7544 }
7545
7546 int nfs4_proc_setlease(struct file *file, long arg, struct file_lock **lease,
7547 void **priv)
7548 {
7549 switch (arg) {
7550 case F_RDLCK:
7551 case F_WRLCK:
7552 return nfs4_add_lease(file, arg, lease, priv);
7553 case F_UNLCK:
7554 return nfs4_delete_lease(file, priv);
7555 default:
7556 return -EINVAL;
7557 }
7558 }
7559
7560 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
7561 {
7562 struct nfs_server *server = NFS_SERVER(state->inode);
7563 int err;
7564
7565 err = nfs4_set_lock_state(state, fl);
7566 if (err != 0)
7567 return err;
7568 do {
7569 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
7570 if (err != -NFS4ERR_DELAY)
7571 break;
7572 ssleep(1);
7573 } while (err == -NFS4ERR_DELAY);
7574 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
7575 }
7576
7577 struct nfs_release_lockowner_data {
7578 struct nfs4_lock_state *lsp;
7579 struct nfs_server *server;
7580 struct nfs_release_lockowner_args args;
7581 struct nfs_release_lockowner_res res;
7582 unsigned long timestamp;
7583 };
7584
7585 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
7586 {
7587 struct nfs_release_lockowner_data *data = calldata;
7588 struct nfs_server *server = data->server;
7589 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
7590 &data->res.seq_res, task);
7591 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7592 data->timestamp = jiffies;
7593 }
7594
7595 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
7596 {
7597 struct nfs_release_lockowner_data *data = calldata;
7598 struct nfs_server *server = data->server;
7599
7600 nfs40_sequence_done(task, &data->res.seq_res);
7601
7602 switch (task->tk_status) {
7603 case 0:
7604 renew_lease(server, data->timestamp);
7605 break;
7606 case -NFS4ERR_STALE_CLIENTID:
7607 case -NFS4ERR_EXPIRED:
7608 nfs4_schedule_lease_recovery(server->nfs_client);
7609 break;
7610 case -NFS4ERR_LEASE_MOVED:
7611 case -NFS4ERR_DELAY:
7612 if (nfs4_async_handle_error(task, server,
7613 NULL, NULL) == -EAGAIN)
7614 rpc_restart_call_prepare(task);
7615 }
7616 }
7617
7618 static void nfs4_release_lockowner_release(void *calldata)
7619 {
7620 struct nfs_release_lockowner_data *data = calldata;
7621 nfs4_free_lock_state(data->server, data->lsp);
7622 kfree(calldata);
7623 }
7624
7625 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
7626 .rpc_call_prepare = nfs4_release_lockowner_prepare,
7627 .rpc_call_done = nfs4_release_lockowner_done,
7628 .rpc_release = nfs4_release_lockowner_release,
7629 };
7630
7631 static void
7632 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
7633 {
7634 struct nfs_release_lockowner_data *data;
7635 struct rpc_message msg = {
7636 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
7637 };
7638
7639 if (server->nfs_client->cl_mvops->minor_version != 0)
7640 return;
7641
7642 data = kmalloc(sizeof(*data), GFP_NOFS);
7643 if (!data)
7644 return;
7645 data->lsp = lsp;
7646 data->server = server;
7647 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7648 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
7649 data->args.lock_owner.s_dev = server->s_dev;
7650
7651 msg.rpc_argp = &data->args;
7652 msg.rpc_resp = &data->res;
7653 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
7654 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
7655 }
7656
7657 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
7658
7659 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
7660 struct user_namespace *mnt_userns,
7661 struct dentry *unused, struct inode *inode,
7662 const char *key, const void *buf,
7663 size_t buflen, int flags)
7664 {
7665 return nfs4_proc_set_acl(inode, buf, buflen);
7666 }
7667
7668 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
7669 struct dentry *unused, struct inode *inode,
7670 const char *key, void *buf, size_t buflen)
7671 {
7672 return nfs4_proc_get_acl(inode, buf, buflen);
7673 }
7674
7675 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
7676 {
7677 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
7678 }
7679
7680 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
7681
7682 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
7683 struct user_namespace *mnt_userns,
7684 struct dentry *unused, struct inode *inode,
7685 const char *key, const void *buf,
7686 size_t buflen, int flags)
7687 {
7688 if (security_ismaclabel(key))
7689 return nfs4_set_security_label(inode, buf, buflen);
7690
7691 return -EOPNOTSUPP;
7692 }
7693
7694 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
7695 struct dentry *unused, struct inode *inode,
7696 const char *key, void *buf, size_t buflen)
7697 {
7698 if (security_ismaclabel(key))
7699 return nfs4_get_security_label(inode, buf, buflen);
7700 return -EOPNOTSUPP;
7701 }
7702
7703 static ssize_t
7704 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7705 {
7706 int len = 0;
7707
7708 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
7709 len = security_inode_listsecurity(inode, list, list_len);
7710 if (len >= 0 && list_len && len > list_len)
7711 return -ERANGE;
7712 }
7713 return len;
7714 }
7715
7716 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
7717 .prefix = XATTR_SECURITY_PREFIX,
7718 .get = nfs4_xattr_get_nfs4_label,
7719 .set = nfs4_xattr_set_nfs4_label,
7720 };
7721
7722 #else
7723
7724 static ssize_t
7725 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7726 {
7727 return 0;
7728 }
7729
7730 #endif
7731
7732 #ifdef CONFIG_NFS_V4_2
7733 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
7734 struct user_namespace *mnt_userns,
7735 struct dentry *unused, struct inode *inode,
7736 const char *key, const void *buf,
7737 size_t buflen, int flags)
7738 {
7739 u32 mask;
7740 int ret;
7741
7742 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7743 return -EOPNOTSUPP;
7744
7745 /*
7746 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA*
7747 * flags right now. Handling of xattr operations use the normal
7748 * file read/write permissions.
7749 *
7750 * Just in case the server has other ideas (which RFC 8276 allows),
7751 * do a cached access check for the XA* flags to possibly avoid
7752 * doing an RPC and getting EACCES back.
7753 */
7754 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7755 if (!(mask & NFS_ACCESS_XAWRITE))
7756 return -EACCES;
7757 }
7758
7759 if (buf == NULL) {
7760 ret = nfs42_proc_removexattr(inode, key);
7761 if (!ret)
7762 nfs4_xattr_cache_remove(inode, key);
7763 } else {
7764 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags);
7765 if (!ret)
7766 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen);
7767 }
7768
7769 return ret;
7770 }
7771
7772 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
7773 struct dentry *unused, struct inode *inode,
7774 const char *key, void *buf, size_t buflen)
7775 {
7776 u32 mask;
7777 ssize_t ret;
7778
7779 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7780 return -EOPNOTSUPP;
7781
7782 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7783 if (!(mask & NFS_ACCESS_XAREAD))
7784 return -EACCES;
7785 }
7786
7787 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
7788 if (ret)
7789 return ret;
7790
7791 ret = nfs4_xattr_cache_get(inode, key, buf, buflen);
7792 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7793 return ret;
7794
7795 ret = nfs42_proc_getxattr(inode, key, buf, buflen);
7796
7797 return ret;
7798 }
7799
7800 static ssize_t
7801 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7802 {
7803 u64 cookie;
7804 bool eof;
7805 ssize_t ret, size;
7806 char *buf;
7807 size_t buflen;
7808 u32 mask;
7809
7810 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7811 return 0;
7812
7813 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7814 if (!(mask & NFS_ACCESS_XALIST))
7815 return 0;
7816 }
7817
7818 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
7819 if (ret)
7820 return ret;
7821
7822 ret = nfs4_xattr_cache_list(inode, list, list_len);
7823 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7824 return ret;
7825
7826 cookie = 0;
7827 eof = false;
7828 buflen = list_len ? list_len : XATTR_LIST_MAX;
7829 buf = list_len ? list : NULL;
7830 size = 0;
7831
7832 while (!eof) {
7833 ret = nfs42_proc_listxattrs(inode, buf, buflen,
7834 &cookie, &eof);
7835 if (ret < 0)
7836 return ret;
7837
7838 if (list_len) {
7839 buf += ret;
7840 buflen -= ret;
7841 }
7842 size += ret;
7843 }
7844
7845 if (list_len)
7846 nfs4_xattr_cache_set_list(inode, list, size);
7847
7848 return size;
7849 }
7850
7851 #else
7852
7853 static ssize_t
7854 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7855 {
7856 return 0;
7857 }
7858 #endif /* CONFIG_NFS_V4_2 */
7859
7860 /*
7861 * nfs_fhget will use either the mounted_on_fileid or the fileid
7862 */
7863 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
7864 {
7865 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
7866 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
7867 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
7868 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
7869 return;
7870
7871 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
7872 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
7873 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
7874 fattr->nlink = 2;
7875 }
7876
7877 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7878 const struct qstr *name,
7879 struct nfs4_fs_locations *fs_locations,
7880 struct page *page)
7881 {
7882 struct nfs_server *server = NFS_SERVER(dir);
7883 u32 bitmask[3];
7884 struct nfs4_fs_locations_arg args = {
7885 .dir_fh = NFS_FH(dir),
7886 .name = name,
7887 .page = page,
7888 .bitmask = bitmask,
7889 };
7890 struct nfs4_fs_locations_res res = {
7891 .fs_locations = fs_locations,
7892 };
7893 struct rpc_message msg = {
7894 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7895 .rpc_argp = &args,
7896 .rpc_resp = &res,
7897 };
7898 int status;
7899
7900 dprintk("%s: start\n", __func__);
7901
7902 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
7903 bitmask[1] = nfs4_fattr_bitmap[1];
7904
7905 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
7906 * is not supported */
7907 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
7908 bitmask[0] &= ~FATTR4_WORD0_FILEID;
7909 else
7910 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
7911
7912 nfs_fattr_init(&fs_locations->fattr);
7913 fs_locations->server = server;
7914 fs_locations->nlocations = 0;
7915 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
7916 dprintk("%s: returned status = %d\n", __func__, status);
7917 return status;
7918 }
7919
7920 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7921 const struct qstr *name,
7922 struct nfs4_fs_locations *fs_locations,
7923 struct page *page)
7924 {
7925 struct nfs4_exception exception = {
7926 .interruptible = true,
7927 };
7928 int err;
7929 do {
7930 err = _nfs4_proc_fs_locations(client, dir, name,
7931 fs_locations, page);
7932 trace_nfs4_get_fs_locations(dir, name, err);
7933 err = nfs4_handle_exception(NFS_SERVER(dir), err,
7934 &exception);
7935 } while (exception.retry);
7936 return err;
7937 }
7938
7939 /*
7940 * This operation also signals the server that this client is
7941 * performing migration recovery. The server can stop returning
7942 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
7943 * appended to this compound to identify the client ID which is
7944 * performing recovery.
7945 */
7946 static int _nfs40_proc_get_locations(struct nfs_server *server,
7947 struct nfs_fh *fhandle,
7948 struct nfs4_fs_locations *locations,
7949 struct page *page, const struct cred *cred)
7950 {
7951 struct rpc_clnt *clnt = server->client;
7952 u32 bitmask[2] = {
7953 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7954 };
7955 struct nfs4_fs_locations_arg args = {
7956 .clientid = server->nfs_client->cl_clientid,
7957 .fh = fhandle,
7958 .page = page,
7959 .bitmask = bitmask,
7960 .migration = 1, /* skip LOOKUP */
7961 .renew = 1, /* append RENEW */
7962 };
7963 struct nfs4_fs_locations_res res = {
7964 .fs_locations = locations,
7965 .migration = 1,
7966 .renew = 1,
7967 };
7968 struct rpc_message msg = {
7969 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7970 .rpc_argp = &args,
7971 .rpc_resp = &res,
7972 .rpc_cred = cred,
7973 };
7974 unsigned long now = jiffies;
7975 int status;
7976
7977 nfs_fattr_init(&locations->fattr);
7978 locations->server = server;
7979 locations->nlocations = 0;
7980
7981 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7982 status = nfs4_call_sync_sequence(clnt, server, &msg,
7983 &args.seq_args, &res.seq_res);
7984 if (status)
7985 return status;
7986
7987 renew_lease(server, now);
7988 return 0;
7989 }
7990
7991 #ifdef CONFIG_NFS_V4_1
7992
7993 /*
7994 * This operation also signals the server that this client is
7995 * performing migration recovery. The server can stop asserting
7996 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
7997 * performing this operation is identified in the SEQUENCE
7998 * operation in this compound.
7999 *
8000 * When the client supports GETATTR(fs_locations_info), it can
8001 * be plumbed in here.
8002 */
8003 static int _nfs41_proc_get_locations(struct nfs_server *server,
8004 struct nfs_fh *fhandle,
8005 struct nfs4_fs_locations *locations,
8006 struct page *page, const struct cred *cred)
8007 {
8008 struct rpc_clnt *clnt = server->client;
8009 u32 bitmask[2] = {
8010 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
8011 };
8012 struct nfs4_fs_locations_arg args = {
8013 .fh = fhandle,
8014 .page = page,
8015 .bitmask = bitmask,
8016 .migration = 1, /* skip LOOKUP */
8017 };
8018 struct nfs4_fs_locations_res res = {
8019 .fs_locations = locations,
8020 .migration = 1,
8021 };
8022 struct rpc_message msg = {
8023 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8024 .rpc_argp = &args,
8025 .rpc_resp = &res,
8026 .rpc_cred = cred,
8027 };
8028 int status;
8029
8030 nfs_fattr_init(&locations->fattr);
8031 locations->server = server;
8032 locations->nlocations = 0;
8033
8034 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8035 status = nfs4_call_sync_sequence(clnt, server, &msg,
8036 &args.seq_args, &res.seq_res);
8037 if (status == NFS4_OK &&
8038 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8039 status = -NFS4ERR_LEASE_MOVED;
8040 return status;
8041 }
8042
8043 #endif /* CONFIG_NFS_V4_1 */
8044
8045 /**
8046 * nfs4_proc_get_locations - discover locations for a migrated FSID
8047 * @inode: inode on FSID that is migrating
8048 * @locations: result of query
8049 * @page: buffer
8050 * @cred: credential to use for this operation
8051 *
8052 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
8053 * operation failed, or a negative errno if a local error occurred.
8054 *
8055 * On success, "locations" is filled in, but if the server has
8056 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
8057 * asserted.
8058 *
8059 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
8060 * from this client that require migration recovery.
8061 */
8062 int nfs4_proc_get_locations(struct nfs_server *server,
8063 struct nfs_fh *fhandle,
8064 struct nfs4_fs_locations *locations,
8065 struct page *page, const struct cred *cred)
8066 {
8067 struct nfs_client *clp = server->nfs_client;
8068 const struct nfs4_mig_recovery_ops *ops =
8069 clp->cl_mvops->mig_recovery_ops;
8070 struct nfs4_exception exception = {
8071 .interruptible = true,
8072 };
8073 int status;
8074
8075 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8076 (unsigned long long)server->fsid.major,
8077 (unsigned long long)server->fsid.minor,
8078 clp->cl_hostname);
8079 nfs_display_fhandle(fhandle, __func__);
8080
8081 do {
8082 status = ops->get_locations(server, fhandle, locations, page,
8083 cred);
8084 if (status != -NFS4ERR_DELAY)
8085 break;
8086 nfs4_handle_exception(server, status, &exception);
8087 } while (exception.retry);
8088 return status;
8089 }
8090
8091 /*
8092 * This operation also signals the server that this client is
8093 * performing "lease moved" recovery. The server can stop
8094 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
8095 * is appended to this compound to identify the client ID which is
8096 * performing recovery.
8097 */
8098 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
8099 {
8100 struct nfs_server *server = NFS_SERVER(inode);
8101 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
8102 struct rpc_clnt *clnt = server->client;
8103 struct nfs4_fsid_present_arg args = {
8104 .fh = NFS_FH(inode),
8105 .clientid = clp->cl_clientid,
8106 .renew = 1, /* append RENEW */
8107 };
8108 struct nfs4_fsid_present_res res = {
8109 .renew = 1,
8110 };
8111 struct rpc_message msg = {
8112 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8113 .rpc_argp = &args,
8114 .rpc_resp = &res,
8115 .rpc_cred = cred,
8116 };
8117 unsigned long now = jiffies;
8118 int status;
8119
8120 res.fh = nfs_alloc_fhandle();
8121 if (res.fh == NULL)
8122 return -ENOMEM;
8123
8124 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8125 status = nfs4_call_sync_sequence(clnt, server, &msg,
8126 &args.seq_args, &res.seq_res);
8127 nfs_free_fhandle(res.fh);
8128 if (status)
8129 return status;
8130
8131 do_renew_lease(clp, now);
8132 return 0;
8133 }
8134
8135 #ifdef CONFIG_NFS_V4_1
8136
8137 /*
8138 * This operation also signals the server that this client is
8139 * performing "lease moved" recovery. The server can stop asserting
8140 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
8141 * this operation is identified in the SEQUENCE operation in this
8142 * compound.
8143 */
8144 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
8145 {
8146 struct nfs_server *server = NFS_SERVER(inode);
8147 struct rpc_clnt *clnt = server->client;
8148 struct nfs4_fsid_present_arg args = {
8149 .fh = NFS_FH(inode),
8150 };
8151 struct nfs4_fsid_present_res res = {
8152 };
8153 struct rpc_message msg = {
8154 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8155 .rpc_argp = &args,
8156 .rpc_resp = &res,
8157 .rpc_cred = cred,
8158 };
8159 int status;
8160
8161 res.fh = nfs_alloc_fhandle();
8162 if (res.fh == NULL)
8163 return -ENOMEM;
8164
8165 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8166 status = nfs4_call_sync_sequence(clnt, server, &msg,
8167 &args.seq_args, &res.seq_res);
8168 nfs_free_fhandle(res.fh);
8169 if (status == NFS4_OK &&
8170 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8171 status = -NFS4ERR_LEASE_MOVED;
8172 return status;
8173 }
8174
8175 #endif /* CONFIG_NFS_V4_1 */
8176
8177 /**
8178 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
8179 * @inode: inode on FSID to check
8180 * @cred: credential to use for this operation
8181 *
8182 * Server indicates whether the FSID is present, moved, or not
8183 * recognized. This operation is necessary to clear a LEASE_MOVED
8184 * condition for this client ID.
8185 *
8186 * Returns NFS4_OK if the FSID is present on this server,
8187 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
8188 * NFS4ERR code if some error occurred on the server, or a
8189 * negative errno if a local failure occurred.
8190 */
8191 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
8192 {
8193 struct nfs_server *server = NFS_SERVER(inode);
8194 struct nfs_client *clp = server->nfs_client;
8195 const struct nfs4_mig_recovery_ops *ops =
8196 clp->cl_mvops->mig_recovery_ops;
8197 struct nfs4_exception exception = {
8198 .interruptible = true,
8199 };
8200 int status;
8201
8202 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8203 (unsigned long long)server->fsid.major,
8204 (unsigned long long)server->fsid.minor,
8205 clp->cl_hostname);
8206 nfs_display_fhandle(NFS_FH(inode), __func__);
8207
8208 do {
8209 status = ops->fsid_present(inode, cred);
8210 if (status != -NFS4ERR_DELAY)
8211 break;
8212 nfs4_handle_exception(server, status, &exception);
8213 } while (exception.retry);
8214 return status;
8215 }
8216
8217 /*
8218 * If 'use_integrity' is true and the state managment nfs_client
8219 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
8220 * and the machine credential as per RFC3530bis and RFC5661 Security
8221 * Considerations sections. Otherwise, just use the user cred with the
8222 * filesystem's rpc_client.
8223 */
8224 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8225 {
8226 int status;
8227 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
8228 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
8229 struct nfs4_secinfo_arg args = {
8230 .dir_fh = NFS_FH(dir),
8231 .name = name,
8232 };
8233 struct nfs4_secinfo_res res = {
8234 .flavors = flavors,
8235 };
8236 struct rpc_message msg = {
8237 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
8238 .rpc_argp = &args,
8239 .rpc_resp = &res,
8240 };
8241 struct nfs4_call_sync_data data = {
8242 .seq_server = NFS_SERVER(dir),
8243 .seq_args = &args.seq_args,
8244 .seq_res = &res.seq_res,
8245 };
8246 struct rpc_task_setup task_setup = {
8247 .rpc_client = clnt,
8248 .rpc_message = &msg,
8249 .callback_ops = clp->cl_mvops->call_sync_ops,
8250 .callback_data = &data,
8251 .flags = RPC_TASK_NO_ROUND_ROBIN,
8252 };
8253 const struct cred *cred = NULL;
8254
8255 if (use_integrity) {
8256 clnt = clp->cl_rpcclient;
8257 task_setup.rpc_client = clnt;
8258
8259 cred = nfs4_get_clid_cred(clp);
8260 msg.rpc_cred = cred;
8261 }
8262
8263 dprintk("NFS call secinfo %s\n", name->name);
8264
8265 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
8266 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
8267 status = nfs4_call_sync_custom(&task_setup);
8268
8269 dprintk("NFS reply secinfo: %d\n", status);
8270
8271 put_cred(cred);
8272 return status;
8273 }
8274
8275 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
8276 struct nfs4_secinfo_flavors *flavors)
8277 {
8278 struct nfs4_exception exception = {
8279 .interruptible = true,
8280 };
8281 int err;
8282 do {
8283 err = -NFS4ERR_WRONGSEC;
8284
8285 /* try to use integrity protection with machine cred */
8286 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
8287 err = _nfs4_proc_secinfo(dir, name, flavors, true);
8288
8289 /*
8290 * if unable to use integrity protection, or SECINFO with
8291 * integrity protection returns NFS4ERR_WRONGSEC (which is
8292 * disallowed by spec, but exists in deployed servers) use
8293 * the current filesystem's rpc_client and the user cred.
8294 */
8295 if (err == -NFS4ERR_WRONGSEC)
8296 err = _nfs4_proc_secinfo(dir, name, flavors, false);
8297
8298 trace_nfs4_secinfo(dir, name, err);
8299 err = nfs4_handle_exception(NFS_SERVER(dir), err,
8300 &exception);
8301 } while (exception.retry);
8302 return err;
8303 }
8304
8305 #ifdef CONFIG_NFS_V4_1
8306 /*
8307 * Check the exchange flags returned by the server for invalid flags, having
8308 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
8309 * DS flags set.
8310 */
8311 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
8312 {
8313 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
8314 goto out_inval;
8315 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
8316 goto out_inval;
8317 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
8318 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
8319 goto out_inval;
8320 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
8321 goto out_inval;
8322 return NFS_OK;
8323 out_inval:
8324 return -NFS4ERR_INVAL;
8325 }
8326
8327 static bool
8328 nfs41_same_server_scope(struct nfs41_server_scope *a,
8329 struct nfs41_server_scope *b)
8330 {
8331 if (a->server_scope_sz != b->server_scope_sz)
8332 return false;
8333 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
8334 }
8335
8336 static void
8337 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
8338 {
8339 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
8340 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
8341 struct nfs_client *clp = args->client;
8342
8343 switch (task->tk_status) {
8344 case -NFS4ERR_BADSESSION:
8345 case -NFS4ERR_DEADSESSION:
8346 nfs4_schedule_session_recovery(clp->cl_session,
8347 task->tk_status);
8348 return;
8349 }
8350 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
8351 res->dir != NFS4_CDFS4_BOTH) {
8352 rpc_task_close_connection(task);
8353 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
8354 rpc_restart_call(task);
8355 }
8356 }
8357
8358 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
8359 .rpc_call_done = nfs4_bind_one_conn_to_session_done,
8360 };
8361
8362 /*
8363 * nfs4_proc_bind_one_conn_to_session()
8364 *
8365 * The 4.1 client currently uses the same TCP connection for the
8366 * fore and backchannel.
8367 */
8368 static
8369 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
8370 struct rpc_xprt *xprt,
8371 struct nfs_client *clp,
8372 const struct cred *cred)
8373 {
8374 int status;
8375 struct nfs41_bind_conn_to_session_args args = {
8376 .client = clp,
8377 .dir = NFS4_CDFC4_FORE_OR_BOTH,
8378 .retries = 0,
8379 };
8380 struct nfs41_bind_conn_to_session_res res;
8381 struct rpc_message msg = {
8382 .rpc_proc =
8383 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
8384 .rpc_argp = &args,
8385 .rpc_resp = &res,
8386 .rpc_cred = cred,
8387 };
8388 struct rpc_task_setup task_setup_data = {
8389 .rpc_client = clnt,
8390 .rpc_xprt = xprt,
8391 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
8392 .rpc_message = &msg,
8393 .flags = RPC_TASK_TIMEOUT,
8394 };
8395 struct rpc_task *task;
8396
8397 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
8398 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
8399 args.dir = NFS4_CDFC4_FORE;
8400
8401 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
8402 if (xprt != rcu_access_pointer(clnt->cl_xprt))
8403 args.dir = NFS4_CDFC4_FORE;
8404
8405 task = rpc_run_task(&task_setup_data);
8406 if (!IS_ERR(task)) {
8407 status = task->tk_status;
8408 rpc_put_task(task);
8409 } else
8410 status = PTR_ERR(task);
8411 trace_nfs4_bind_conn_to_session(clp, status);
8412 if (status == 0) {
8413 if (memcmp(res.sessionid.data,
8414 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
8415 dprintk("NFS: %s: Session ID mismatch\n", __func__);
8416 return -EIO;
8417 }
8418 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
8419 dprintk("NFS: %s: Unexpected direction from server\n",
8420 __func__);
8421 return -EIO;
8422 }
8423 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
8424 dprintk("NFS: %s: Server returned RDMA mode = true\n",
8425 __func__);
8426 return -EIO;
8427 }
8428 }
8429
8430 return status;
8431 }
8432
8433 struct rpc_bind_conn_calldata {
8434 struct nfs_client *clp;
8435 const struct cred *cred;
8436 };
8437
8438 static int
8439 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
8440 struct rpc_xprt *xprt,
8441 void *calldata)
8442 {
8443 struct rpc_bind_conn_calldata *p = calldata;
8444
8445 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
8446 }
8447
8448 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
8449 {
8450 struct rpc_bind_conn_calldata data = {
8451 .clp = clp,
8452 .cred = cred,
8453 };
8454 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
8455 nfs4_proc_bind_conn_to_session_callback, &data);
8456 }
8457
8458 /*
8459 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
8460 * and operations we'd like to see to enable certain features in the allow map
8461 */
8462 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
8463 .how = SP4_MACH_CRED,
8464 .enforce.u.words = {
8465 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8466 1 << (OP_EXCHANGE_ID - 32) |
8467 1 << (OP_CREATE_SESSION - 32) |
8468 1 << (OP_DESTROY_SESSION - 32) |
8469 1 << (OP_DESTROY_CLIENTID - 32)
8470 },
8471 .allow.u.words = {
8472 [0] = 1 << (OP_CLOSE) |
8473 1 << (OP_OPEN_DOWNGRADE) |
8474 1 << (OP_LOCKU) |
8475 1 << (OP_DELEGRETURN) |
8476 1 << (OP_COMMIT),
8477 [1] = 1 << (OP_SECINFO - 32) |
8478 1 << (OP_SECINFO_NO_NAME - 32) |
8479 1 << (OP_LAYOUTRETURN - 32) |
8480 1 << (OP_TEST_STATEID - 32) |
8481 1 << (OP_FREE_STATEID - 32) |
8482 1 << (OP_WRITE - 32)
8483 }
8484 };
8485
8486 /*
8487 * Select the state protection mode for client `clp' given the server results
8488 * from exchange_id in `sp'.
8489 *
8490 * Returns 0 on success, negative errno otherwise.
8491 */
8492 static int nfs4_sp4_select_mode(struct nfs_client *clp,
8493 struct nfs41_state_protection *sp)
8494 {
8495 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
8496 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8497 1 << (OP_EXCHANGE_ID - 32) |
8498 1 << (OP_CREATE_SESSION - 32) |
8499 1 << (OP_DESTROY_SESSION - 32) |
8500 1 << (OP_DESTROY_CLIENTID - 32)
8501 };
8502 unsigned long flags = 0;
8503 unsigned int i;
8504 int ret = 0;
8505
8506 if (sp->how == SP4_MACH_CRED) {
8507 /* Print state protect result */
8508 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
8509 for (i = 0; i <= LAST_NFS4_OP; i++) {
8510 if (test_bit(i, sp->enforce.u.longs))
8511 dfprintk(MOUNT, " enforce op %d\n", i);
8512 if (test_bit(i, sp->allow.u.longs))
8513 dfprintk(MOUNT, " allow op %d\n", i);
8514 }
8515
8516 /* make sure nothing is on enforce list that isn't supported */
8517 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
8518 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
8519 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8520 ret = -EINVAL;
8521 goto out;
8522 }
8523 }
8524
8525 /*
8526 * Minimal mode - state operations are allowed to use machine
8527 * credential. Note this already happens by default, so the
8528 * client doesn't have to do anything more than the negotiation.
8529 *
8530 * NOTE: we don't care if EXCHANGE_ID is in the list -
8531 * we're already using the machine cred for exchange_id
8532 * and will never use a different cred.
8533 */
8534 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
8535 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
8536 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
8537 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
8538 dfprintk(MOUNT, "sp4_mach_cred:\n");
8539 dfprintk(MOUNT, " minimal mode enabled\n");
8540 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
8541 } else {
8542 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8543 ret = -EINVAL;
8544 goto out;
8545 }
8546
8547 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
8548 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
8549 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
8550 test_bit(OP_LOCKU, sp->allow.u.longs)) {
8551 dfprintk(MOUNT, " cleanup mode enabled\n");
8552 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
8553 }
8554
8555 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
8556 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
8557 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
8558 }
8559
8560 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
8561 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
8562 dfprintk(MOUNT, " secinfo mode enabled\n");
8563 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
8564 }
8565
8566 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
8567 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
8568 dfprintk(MOUNT, " stateid mode enabled\n");
8569 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
8570 }
8571
8572 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
8573 dfprintk(MOUNT, " write mode enabled\n");
8574 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
8575 }
8576
8577 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
8578 dfprintk(MOUNT, " commit mode enabled\n");
8579 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
8580 }
8581 }
8582 out:
8583 clp->cl_sp4_flags = flags;
8584 return ret;
8585 }
8586
8587 struct nfs41_exchange_id_data {
8588 struct nfs41_exchange_id_res res;
8589 struct nfs41_exchange_id_args args;
8590 };
8591
8592 static void nfs4_exchange_id_release(void *data)
8593 {
8594 struct nfs41_exchange_id_data *cdata =
8595 (struct nfs41_exchange_id_data *)data;
8596
8597 nfs_put_client(cdata->args.client);
8598 kfree(cdata->res.impl_id);
8599 kfree(cdata->res.server_scope);
8600 kfree(cdata->res.server_owner);
8601 kfree(cdata);
8602 }
8603
8604 static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
8605 .rpc_release = nfs4_exchange_id_release,
8606 };
8607
8608 /*
8609 * _nfs4_proc_exchange_id()
8610 *
8611 * Wrapper for EXCHANGE_ID operation.
8612 */
8613 static struct rpc_task *
8614 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
8615 u32 sp4_how, struct rpc_xprt *xprt)
8616 {
8617 struct rpc_message msg = {
8618 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
8619 .rpc_cred = cred,
8620 };
8621 struct rpc_task_setup task_setup_data = {
8622 .rpc_client = clp->cl_rpcclient,
8623 .callback_ops = &nfs4_exchange_id_call_ops,
8624 .rpc_message = &msg,
8625 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
8626 };
8627 struct nfs41_exchange_id_data *calldata;
8628 int status;
8629
8630 if (!refcount_inc_not_zero(&clp->cl_count))
8631 return ERR_PTR(-EIO);
8632
8633 status = -ENOMEM;
8634 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8635 if (!calldata)
8636 goto out;
8637
8638 nfs4_init_boot_verifier(clp, &calldata->args.verifier);
8639
8640 status = nfs4_init_uniform_client_string(clp);
8641 if (status)
8642 goto out_calldata;
8643
8644 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
8645 GFP_NOFS);
8646 status = -ENOMEM;
8647 if (unlikely(calldata->res.server_owner == NULL))
8648 goto out_calldata;
8649
8650 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
8651 GFP_NOFS);
8652 if (unlikely(calldata->res.server_scope == NULL))
8653 goto out_server_owner;
8654
8655 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
8656 if (unlikely(calldata->res.impl_id == NULL))
8657 goto out_server_scope;
8658
8659 switch (sp4_how) {
8660 case SP4_NONE:
8661 calldata->args.state_protect.how = SP4_NONE;
8662 break;
8663
8664 case SP4_MACH_CRED:
8665 calldata->args.state_protect = nfs4_sp4_mach_cred_request;
8666 break;
8667
8668 default:
8669 /* unsupported! */
8670 WARN_ON_ONCE(1);
8671 status = -EINVAL;
8672 goto out_impl_id;
8673 }
8674 if (xprt) {
8675 task_setup_data.rpc_xprt = xprt;
8676 task_setup_data.flags |= RPC_TASK_SOFTCONN;
8677 memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
8678 sizeof(calldata->args.verifier.data));
8679 }
8680 calldata->args.client = clp;
8681 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
8682 EXCHGID4_FLAG_BIND_PRINC_STATEID;
8683 #ifdef CONFIG_NFS_V4_1_MIGRATION
8684 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
8685 #endif
8686 msg.rpc_argp = &calldata->args;
8687 msg.rpc_resp = &calldata->res;
8688 task_setup_data.callback_data = calldata;
8689
8690 return rpc_run_task(&task_setup_data);
8691
8692 out_impl_id:
8693 kfree(calldata->res.impl_id);
8694 out_server_scope:
8695 kfree(calldata->res.server_scope);
8696 out_server_owner:
8697 kfree(calldata->res.server_owner);
8698 out_calldata:
8699 kfree(calldata);
8700 out:
8701 nfs_put_client(clp);
8702 return ERR_PTR(status);
8703 }
8704
8705 /*
8706 * _nfs4_proc_exchange_id()
8707 *
8708 * Wrapper for EXCHANGE_ID operation.
8709 */
8710 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
8711 u32 sp4_how)
8712 {
8713 struct rpc_task *task;
8714 struct nfs41_exchange_id_args *argp;
8715 struct nfs41_exchange_id_res *resp;
8716 unsigned long now = jiffies;
8717 int status;
8718
8719 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
8720 if (IS_ERR(task))
8721 return PTR_ERR(task);
8722
8723 argp = task->tk_msg.rpc_argp;
8724 resp = task->tk_msg.rpc_resp;
8725 status = task->tk_status;
8726 if (status != 0)
8727 goto out;
8728
8729 status = nfs4_check_cl_exchange_flags(resp->flags,
8730 clp->cl_mvops->minor_version);
8731 if (status != 0)
8732 goto out;
8733
8734 status = nfs4_sp4_select_mode(clp, &resp->state_protect);
8735 if (status != 0)
8736 goto out;
8737
8738 do_renew_lease(clp, now);
8739
8740 clp->cl_clientid = resp->clientid;
8741 clp->cl_exchange_flags = resp->flags;
8742 clp->cl_seqid = resp->seqid;
8743 /* Client ID is not confirmed */
8744 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
8745 clear_bit(NFS4_SESSION_ESTABLISHED,
8746 &clp->cl_session->session_state);
8747
8748 if (clp->cl_serverscope != NULL &&
8749 !nfs41_same_server_scope(clp->cl_serverscope,
8750 resp->server_scope)) {
8751 dprintk("%s: server_scope mismatch detected\n",
8752 __func__);
8753 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
8754 }
8755
8756 swap(clp->cl_serverowner, resp->server_owner);
8757 swap(clp->cl_serverscope, resp->server_scope);
8758 swap(clp->cl_implid, resp->impl_id);
8759
8760 /* Save the EXCHANGE_ID verifier session trunk tests */
8761 memcpy(clp->cl_confirm.data, argp->verifier.data,
8762 sizeof(clp->cl_confirm.data));
8763 out:
8764 trace_nfs4_exchange_id(clp, status);
8765 rpc_put_task(task);
8766 return status;
8767 }
8768
8769 /*
8770 * nfs4_proc_exchange_id()
8771 *
8772 * Returns zero, a negative errno, or a negative NFS4ERR status code.
8773 *
8774 * Since the clientid has expired, all compounds using sessions
8775 * associated with the stale clientid will be returning
8776 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
8777 * be in some phase of session reset.
8778 *
8779 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
8780 */
8781 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
8782 {
8783 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
8784 int status;
8785
8786 /* try SP4_MACH_CRED if krb5i/p */
8787 if (authflavor == RPC_AUTH_GSS_KRB5I ||
8788 authflavor == RPC_AUTH_GSS_KRB5P) {
8789 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
8790 if (!status)
8791 return 0;
8792 }
8793
8794 /* try SP4_NONE */
8795 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
8796 }
8797
8798 /**
8799 * nfs4_test_session_trunk
8800 *
8801 * This is an add_xprt_test() test function called from
8802 * rpc_clnt_setup_test_and_add_xprt.
8803 *
8804 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
8805 * and is dereferrenced in nfs4_exchange_id_release
8806 *
8807 * Upon success, add the new transport to the rpc_clnt
8808 *
8809 * @clnt: struct rpc_clnt to get new transport
8810 * @xprt: the rpc_xprt to test
8811 * @data: call data for _nfs4_proc_exchange_id.
8812 */
8813 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
8814 void *data)
8815 {
8816 struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data;
8817 struct rpc_task *task;
8818 int status;
8819
8820 u32 sp4_how;
8821
8822 dprintk("--> %s try %s\n", __func__,
8823 xprt->address_strings[RPC_DISPLAY_ADDR]);
8824
8825 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
8826
8827 /* Test connection for session trunking. Async exchange_id call */
8828 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
8829 if (IS_ERR(task))
8830 return;
8831
8832 status = task->tk_status;
8833 if (status == 0)
8834 status = nfs4_detect_session_trunking(adata->clp,
8835 task->tk_msg.rpc_resp, xprt);
8836
8837 if (status == 0)
8838 rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
8839
8840 rpc_put_task(task);
8841 }
8842 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
8843
8844 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
8845 const struct cred *cred)
8846 {
8847 struct rpc_message msg = {
8848 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
8849 .rpc_argp = clp,
8850 .rpc_cred = cred,
8851 };
8852 int status;
8853
8854 status = rpc_call_sync(clp->cl_rpcclient, &msg,
8855 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
8856 trace_nfs4_destroy_clientid(clp, status);
8857 if (status)
8858 dprintk("NFS: Got error %d from the server %s on "
8859 "DESTROY_CLIENTID.", status, clp->cl_hostname);
8860 return status;
8861 }
8862
8863 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
8864 const struct cred *cred)
8865 {
8866 unsigned int loop;
8867 int ret;
8868
8869 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
8870 ret = _nfs4_proc_destroy_clientid(clp, cred);
8871 switch (ret) {
8872 case -NFS4ERR_DELAY:
8873 case -NFS4ERR_CLIENTID_BUSY:
8874 ssleep(1);
8875 break;
8876 default:
8877 return ret;
8878 }
8879 }
8880 return 0;
8881 }
8882
8883 int nfs4_destroy_clientid(struct nfs_client *clp)
8884 {
8885 const struct cred *cred;
8886 int ret = 0;
8887
8888 if (clp->cl_mvops->minor_version < 1)
8889 goto out;
8890 if (clp->cl_exchange_flags == 0)
8891 goto out;
8892 if (clp->cl_preserve_clid)
8893 goto out;
8894 cred = nfs4_get_clid_cred(clp);
8895 ret = nfs4_proc_destroy_clientid(clp, cred);
8896 put_cred(cred);
8897 switch (ret) {
8898 case 0:
8899 case -NFS4ERR_STALE_CLIENTID:
8900 clp->cl_exchange_flags = 0;
8901 }
8902 out:
8903 return ret;
8904 }
8905
8906 #endif /* CONFIG_NFS_V4_1 */
8907
8908 struct nfs4_get_lease_time_data {
8909 struct nfs4_get_lease_time_args *args;
8910 struct nfs4_get_lease_time_res *res;
8911 struct nfs_client *clp;
8912 };
8913
8914 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
8915 void *calldata)
8916 {
8917 struct nfs4_get_lease_time_data *data =
8918 (struct nfs4_get_lease_time_data *)calldata;
8919
8920 dprintk("--> %s\n", __func__);
8921 /* just setup sequence, do not trigger session recovery
8922 since we're invoked within one */
8923 nfs4_setup_sequence(data->clp,
8924 &data->args->la_seq_args,
8925 &data->res->lr_seq_res,
8926 task);
8927 dprintk("<-- %s\n", __func__);
8928 }
8929
8930 /*
8931 * Called from nfs4_state_manager thread for session setup, so don't recover
8932 * from sequence operation or clientid errors.
8933 */
8934 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
8935 {
8936 struct nfs4_get_lease_time_data *data =
8937 (struct nfs4_get_lease_time_data *)calldata;
8938
8939 dprintk("--> %s\n", __func__);
8940 if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
8941 return;
8942 switch (task->tk_status) {
8943 case -NFS4ERR_DELAY:
8944 case -NFS4ERR_GRACE:
8945 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
8946 rpc_delay(task, NFS4_POLL_RETRY_MIN);
8947 task->tk_status = 0;
8948 fallthrough;
8949 case -NFS4ERR_RETRY_UNCACHED_REP:
8950 rpc_restart_call_prepare(task);
8951 return;
8952 }
8953 dprintk("<-- %s\n", __func__);
8954 }
8955
8956 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
8957 .rpc_call_prepare = nfs4_get_lease_time_prepare,
8958 .rpc_call_done = nfs4_get_lease_time_done,
8959 };
8960
8961 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
8962 {
8963 struct nfs4_get_lease_time_args args;
8964 struct nfs4_get_lease_time_res res = {
8965 .lr_fsinfo = fsinfo,
8966 };
8967 struct nfs4_get_lease_time_data data = {
8968 .args = &args,
8969 .res = &res,
8970 .clp = clp,
8971 };
8972 struct rpc_message msg = {
8973 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
8974 .rpc_argp = &args,
8975 .rpc_resp = &res,
8976 };
8977 struct rpc_task_setup task_setup = {
8978 .rpc_client = clp->cl_rpcclient,
8979 .rpc_message = &msg,
8980 .callback_ops = &nfs4_get_lease_time_ops,
8981 .callback_data = &data,
8982 .flags = RPC_TASK_TIMEOUT,
8983 };
8984
8985 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
8986 return nfs4_call_sync_custom(&task_setup);
8987 }
8988
8989 #ifdef CONFIG_NFS_V4_1
8990
8991 /*
8992 * Initialize the values to be used by the client in CREATE_SESSION
8993 * If nfs4_init_session set the fore channel request and response sizes,
8994 * use them.
8995 *
8996 * Set the back channel max_resp_sz_cached to zero to force the client to
8997 * always set csa_cachethis to FALSE because the current implementation
8998 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
8999 */
9000 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
9001 struct rpc_clnt *clnt)
9002 {
9003 unsigned int max_rqst_sz, max_resp_sz;
9004 unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
9005 unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
9006
9007 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
9008 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
9009
9010 /* Fore channel attributes */
9011 args->fc_attrs.max_rqst_sz = max_rqst_sz;
9012 args->fc_attrs.max_resp_sz = max_resp_sz;
9013 args->fc_attrs.max_ops = NFS4_MAX_OPS;
9014 args->fc_attrs.max_reqs = max_session_slots;
9015
9016 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
9017 "max_ops=%u max_reqs=%u\n",
9018 __func__,
9019 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
9020 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
9021
9022 /* Back channel attributes */
9023 args->bc_attrs.max_rqst_sz = max_bc_payload;
9024 args->bc_attrs.max_resp_sz = max_bc_payload;
9025 args->bc_attrs.max_resp_sz_cached = 0;
9026 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
9027 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
9028 if (args->bc_attrs.max_reqs > max_bc_slots)
9029 args->bc_attrs.max_reqs = max_bc_slots;
9030
9031 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
9032 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
9033 __func__,
9034 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
9035 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
9036 args->bc_attrs.max_reqs);
9037 }
9038
9039 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
9040 struct nfs41_create_session_res *res)
9041 {
9042 struct nfs4_channel_attrs *sent = &args->fc_attrs;
9043 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
9044
9045 if (rcvd->max_resp_sz > sent->max_resp_sz)
9046 return -EINVAL;
9047 /*
9048 * Our requested max_ops is the minimum we need; we're not
9049 * prepared to break up compounds into smaller pieces than that.
9050 * So, no point even trying to continue if the server won't
9051 * cooperate:
9052 */
9053 if (rcvd->max_ops < sent->max_ops)
9054 return -EINVAL;
9055 if (rcvd->max_reqs == 0)
9056 return -EINVAL;
9057 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
9058 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
9059 return 0;
9060 }
9061
9062 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
9063 struct nfs41_create_session_res *res)
9064 {
9065 struct nfs4_channel_attrs *sent = &args->bc_attrs;
9066 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
9067
9068 if (!(res->flags & SESSION4_BACK_CHAN))
9069 goto out;
9070 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
9071 return -EINVAL;
9072 if (rcvd->max_resp_sz < sent->max_resp_sz)
9073 return -EINVAL;
9074 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
9075 return -EINVAL;
9076 if (rcvd->max_ops > sent->max_ops)
9077 return -EINVAL;
9078 if (rcvd->max_reqs > sent->max_reqs)
9079 return -EINVAL;
9080 out:
9081 return 0;
9082 }
9083
9084 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
9085 struct nfs41_create_session_res *res)
9086 {
9087 int ret;
9088
9089 ret = nfs4_verify_fore_channel_attrs(args, res);
9090 if (ret)
9091 return ret;
9092 return nfs4_verify_back_channel_attrs(args, res);
9093 }
9094
9095 static void nfs4_update_session(struct nfs4_session *session,
9096 struct nfs41_create_session_res *res)
9097 {
9098 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
9099 /* Mark client id and session as being confirmed */
9100 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
9101 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
9102 session->flags = res->flags;
9103 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
9104 if (res->flags & SESSION4_BACK_CHAN)
9105 memcpy(&session->bc_attrs, &res->bc_attrs,
9106 sizeof(session->bc_attrs));
9107 }
9108
9109 static int _nfs4_proc_create_session(struct nfs_client *clp,
9110 const struct cred *cred)
9111 {
9112 struct nfs4_session *session = clp->cl_session;
9113 struct nfs41_create_session_args args = {
9114 .client = clp,
9115 .clientid = clp->cl_clientid,
9116 .seqid = clp->cl_seqid,
9117 .cb_program = NFS4_CALLBACK,
9118 };
9119 struct nfs41_create_session_res res;
9120
9121 struct rpc_message msg = {
9122 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
9123 .rpc_argp = &args,
9124 .rpc_resp = &res,
9125 .rpc_cred = cred,
9126 };
9127 int status;
9128
9129 nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
9130 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
9131
9132 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9133 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9134 trace_nfs4_create_session(clp, status);
9135
9136 switch (status) {
9137 case -NFS4ERR_STALE_CLIENTID:
9138 case -NFS4ERR_DELAY:
9139 case -ETIMEDOUT:
9140 case -EACCES:
9141 case -EAGAIN:
9142 goto out;
9143 }
9144
9145 clp->cl_seqid++;
9146 if (!status) {
9147 /* Verify the session's negotiated channel_attrs values */
9148 status = nfs4_verify_channel_attrs(&args, &res);
9149 /* Increment the clientid slot sequence id */
9150 if (status)
9151 goto out;
9152 nfs4_update_session(session, &res);
9153 }
9154 out:
9155 return status;
9156 }
9157
9158 /*
9159 * Issues a CREATE_SESSION operation to the server.
9160 * It is the responsibility of the caller to verify the session is
9161 * expired before calling this routine.
9162 */
9163 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
9164 {
9165 int status;
9166 unsigned *ptr;
9167 struct nfs4_session *session = clp->cl_session;
9168
9169 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
9170
9171 status = _nfs4_proc_create_session(clp, cred);
9172 if (status)
9173 goto out;
9174
9175 /* Init or reset the session slot tables */
9176 status = nfs4_setup_session_slot_tables(session);
9177 dprintk("slot table setup returned %d\n", status);
9178 if (status)
9179 goto out;
9180
9181 ptr = (unsigned *)&session->sess_id.data[0];
9182 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
9183 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
9184 out:
9185 dprintk("<-- %s\n", __func__);
9186 return status;
9187 }
9188
9189 /*
9190 * Issue the over-the-wire RPC DESTROY_SESSION.
9191 * The caller must serialize access to this routine.
9192 */
9193 int nfs4_proc_destroy_session(struct nfs4_session *session,
9194 const struct cred *cred)
9195 {
9196 struct rpc_message msg = {
9197 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
9198 .rpc_argp = session,
9199 .rpc_cred = cred,
9200 };
9201 int status = 0;
9202
9203 dprintk("--> nfs4_proc_destroy_session\n");
9204
9205 /* session is still being setup */
9206 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
9207 return 0;
9208
9209 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9210 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9211 trace_nfs4_destroy_session(session->clp, status);
9212
9213 if (status)
9214 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
9215 "Session has been destroyed regardless...\n", status);
9216
9217 dprintk("<-- nfs4_proc_destroy_session\n");
9218 return status;
9219 }
9220
9221 /*
9222 * Renew the cl_session lease.
9223 */
9224 struct nfs4_sequence_data {
9225 struct nfs_client *clp;
9226 struct nfs4_sequence_args args;
9227 struct nfs4_sequence_res res;
9228 };
9229
9230 static void nfs41_sequence_release(void *data)
9231 {
9232 struct nfs4_sequence_data *calldata = data;
9233 struct nfs_client *clp = calldata->clp;
9234
9235 if (refcount_read(&clp->cl_count) > 1)
9236 nfs4_schedule_state_renewal(clp);
9237 nfs_put_client(clp);
9238 kfree(calldata);
9239 }
9240
9241 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9242 {
9243 switch(task->tk_status) {
9244 case -NFS4ERR_DELAY:
9245 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9246 return -EAGAIN;
9247 default:
9248 nfs4_schedule_lease_recovery(clp);
9249 }
9250 return 0;
9251 }
9252
9253 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
9254 {
9255 struct nfs4_sequence_data *calldata = data;
9256 struct nfs_client *clp = calldata->clp;
9257
9258 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
9259 return;
9260
9261 trace_nfs4_sequence(clp, task->tk_status);
9262 if (task->tk_status < 0) {
9263 dprintk("%s ERROR %d\n", __func__, task->tk_status);
9264 if (refcount_read(&clp->cl_count) == 1)
9265 goto out;
9266
9267 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
9268 rpc_restart_call_prepare(task);
9269 return;
9270 }
9271 }
9272 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
9273 out:
9274 dprintk("<-- %s\n", __func__);
9275 }
9276
9277 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
9278 {
9279 struct nfs4_sequence_data *calldata = data;
9280 struct nfs_client *clp = calldata->clp;
9281 struct nfs4_sequence_args *args;
9282 struct nfs4_sequence_res *res;
9283
9284 args = task->tk_msg.rpc_argp;
9285 res = task->tk_msg.rpc_resp;
9286
9287 nfs4_setup_sequence(clp, args, res, task);
9288 }
9289
9290 static const struct rpc_call_ops nfs41_sequence_ops = {
9291 .rpc_call_done = nfs41_sequence_call_done,
9292 .rpc_call_prepare = nfs41_sequence_prepare,
9293 .rpc_release = nfs41_sequence_release,
9294 };
9295
9296 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
9297 const struct cred *cred,
9298 struct nfs4_slot *slot,
9299 bool is_privileged)
9300 {
9301 struct nfs4_sequence_data *calldata;
9302 struct rpc_message msg = {
9303 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
9304 .rpc_cred = cred,
9305 };
9306 struct rpc_task_setup task_setup_data = {
9307 .rpc_client = clp->cl_rpcclient,
9308 .rpc_message = &msg,
9309 .callback_ops = &nfs41_sequence_ops,
9310 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
9311 };
9312 struct rpc_task *ret;
9313
9314 ret = ERR_PTR(-EIO);
9315 if (!refcount_inc_not_zero(&clp->cl_count))
9316 goto out_err;
9317
9318 ret = ERR_PTR(-ENOMEM);
9319 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9320 if (calldata == NULL)
9321 goto out_put_clp;
9322 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
9323 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
9324 msg.rpc_argp = &calldata->args;
9325 msg.rpc_resp = &calldata->res;
9326 calldata->clp = clp;
9327 task_setup_data.callback_data = calldata;
9328
9329 ret = rpc_run_task(&task_setup_data);
9330 if (IS_ERR(ret))
9331 goto out_err;
9332 return ret;
9333 out_put_clp:
9334 nfs_put_client(clp);
9335 out_err:
9336 nfs41_release_slot(slot);
9337 return ret;
9338 }
9339
9340 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
9341 {
9342 struct rpc_task *task;
9343 int ret = 0;
9344
9345 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
9346 return -EAGAIN;
9347 task = _nfs41_proc_sequence(clp, cred, NULL, false);
9348 if (IS_ERR(task))
9349 ret = PTR_ERR(task);
9350 else
9351 rpc_put_task_async(task);
9352 dprintk("<-- %s status=%d\n", __func__, ret);
9353 return ret;
9354 }
9355
9356 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
9357 {
9358 struct rpc_task *task;
9359 int ret;
9360
9361 task = _nfs41_proc_sequence(clp, cred, NULL, true);
9362 if (IS_ERR(task)) {
9363 ret = PTR_ERR(task);
9364 goto out;
9365 }
9366 ret = rpc_wait_for_completion_task(task);
9367 if (!ret)
9368 ret = task->tk_status;
9369 rpc_put_task(task);
9370 out:
9371 dprintk("<-- %s status=%d\n", __func__, ret);
9372 return ret;
9373 }
9374
9375 struct nfs4_reclaim_complete_data {
9376 struct nfs_client *clp;
9377 struct nfs41_reclaim_complete_args arg;
9378 struct nfs41_reclaim_complete_res res;
9379 };
9380
9381 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
9382 {
9383 struct nfs4_reclaim_complete_data *calldata = data;
9384
9385 nfs4_setup_sequence(calldata->clp,
9386 &calldata->arg.seq_args,
9387 &calldata->res.seq_res,
9388 task);
9389 }
9390
9391 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9392 {
9393 switch(task->tk_status) {
9394 case 0:
9395 wake_up_all(&clp->cl_lock_waitq);
9396 fallthrough;
9397 case -NFS4ERR_COMPLETE_ALREADY:
9398 case -NFS4ERR_WRONG_CRED: /* What to do here? */
9399 break;
9400 case -NFS4ERR_DELAY:
9401 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9402 fallthrough;
9403 case -NFS4ERR_RETRY_UNCACHED_REP:
9404 return -EAGAIN;
9405 case -NFS4ERR_BADSESSION:
9406 case -NFS4ERR_DEADSESSION:
9407 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9408 break;
9409 default:
9410 nfs4_schedule_lease_recovery(clp);
9411 }
9412 return 0;
9413 }
9414
9415 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
9416 {
9417 struct nfs4_reclaim_complete_data *calldata = data;
9418 struct nfs_client *clp = calldata->clp;
9419 struct nfs4_sequence_res *res = &calldata->res.seq_res;
9420
9421 dprintk("--> %s\n", __func__);
9422 if (!nfs41_sequence_done(task, res))
9423 return;
9424
9425 trace_nfs4_reclaim_complete(clp, task->tk_status);
9426 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
9427 rpc_restart_call_prepare(task);
9428 return;
9429 }
9430 dprintk("<-- %s\n", __func__);
9431 }
9432
9433 static void nfs4_free_reclaim_complete_data(void *data)
9434 {
9435 struct nfs4_reclaim_complete_data *calldata = data;
9436
9437 kfree(calldata);
9438 }
9439
9440 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
9441 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
9442 .rpc_call_done = nfs4_reclaim_complete_done,
9443 .rpc_release = nfs4_free_reclaim_complete_data,
9444 };
9445
9446 /*
9447 * Issue a global reclaim complete.
9448 */
9449 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
9450 const struct cred *cred)
9451 {
9452 struct nfs4_reclaim_complete_data *calldata;
9453 struct rpc_message msg = {
9454 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
9455 .rpc_cred = cred,
9456 };
9457 struct rpc_task_setup task_setup_data = {
9458 .rpc_client = clp->cl_rpcclient,
9459 .rpc_message = &msg,
9460 .callback_ops = &nfs4_reclaim_complete_call_ops,
9461 .flags = RPC_TASK_NO_ROUND_ROBIN,
9462 };
9463 int status = -ENOMEM;
9464
9465 dprintk("--> %s\n", __func__);
9466 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9467 if (calldata == NULL)
9468 goto out;
9469 calldata->clp = clp;
9470 calldata->arg.one_fs = 0;
9471
9472 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1);
9473 msg.rpc_argp = &calldata->arg;
9474 msg.rpc_resp = &calldata->res;
9475 task_setup_data.callback_data = calldata;
9476 status = nfs4_call_sync_custom(&task_setup_data);
9477 out:
9478 dprintk("<-- %s status=%d\n", __func__, status);
9479 return status;
9480 }
9481
9482 static void
9483 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
9484 {
9485 struct nfs4_layoutget *lgp = calldata;
9486 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
9487
9488 dprintk("--> %s\n", __func__);
9489 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
9490 &lgp->res.seq_res, task);
9491 dprintk("<-- %s\n", __func__);
9492 }
9493
9494 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
9495 {
9496 struct nfs4_layoutget *lgp = calldata;
9497
9498 dprintk("--> %s\n", __func__);
9499 nfs41_sequence_process(task, &lgp->res.seq_res);
9500 dprintk("<-- %s\n", __func__);
9501 }
9502
9503 static int
9504 nfs4_layoutget_handle_exception(struct rpc_task *task,
9505 struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
9506 {
9507 struct inode *inode = lgp->args.inode;
9508 struct nfs_server *server = NFS_SERVER(inode);
9509 struct pnfs_layout_hdr *lo = lgp->lo;
9510 int nfs4err = task->tk_status;
9511 int err, status = 0;
9512 LIST_HEAD(head);
9513
9514 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
9515
9516 nfs4_sequence_free_slot(&lgp->res.seq_res);
9517
9518 switch (nfs4err) {
9519 case 0:
9520 goto out;
9521
9522 /*
9523 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
9524 * on the file. set tk_status to -ENODATA to tell upper layer to
9525 * retry go inband.
9526 */
9527 case -NFS4ERR_LAYOUTUNAVAILABLE:
9528 status = -ENODATA;
9529 goto out;
9530 /*
9531 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
9532 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
9533 */
9534 case -NFS4ERR_BADLAYOUT:
9535 status = -EOVERFLOW;
9536 goto out;
9537 /*
9538 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
9539 * (or clients) writing to the same RAID stripe except when
9540 * the minlength argument is 0 (see RFC5661 section 18.43.3).
9541 *
9542 * Treat it like we would RECALLCONFLICT -- we retry for a little
9543 * while, and then eventually give up.
9544 */
9545 case -NFS4ERR_LAYOUTTRYLATER:
9546 if (lgp->args.minlength == 0) {
9547 status = -EOVERFLOW;
9548 goto out;
9549 }
9550 status = -EBUSY;
9551 break;
9552 case -NFS4ERR_RECALLCONFLICT:
9553 status = -ERECALLCONFLICT;
9554 break;
9555 case -NFS4ERR_DELEG_REVOKED:
9556 case -NFS4ERR_ADMIN_REVOKED:
9557 case -NFS4ERR_EXPIRED:
9558 case -NFS4ERR_BAD_STATEID:
9559 exception->timeout = 0;
9560 spin_lock(&inode->i_lock);
9561 /* If the open stateid was bad, then recover it. */
9562 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
9563 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
9564 spin_unlock(&inode->i_lock);
9565 exception->state = lgp->args.ctx->state;
9566 exception->stateid = &lgp->args.stateid;
9567 break;
9568 }
9569
9570 /*
9571 * Mark the bad layout state as invalid, then retry
9572 */
9573 pnfs_mark_layout_stateid_invalid(lo, &head);
9574 spin_unlock(&inode->i_lock);
9575 nfs_commit_inode(inode, 0);
9576 pnfs_free_lseg_list(&head);
9577 status = -EAGAIN;
9578 goto out;
9579 }
9580
9581 err = nfs4_handle_exception(server, nfs4err, exception);
9582 if (!status) {
9583 if (exception->retry)
9584 status = -EAGAIN;
9585 else
9586 status = err;
9587 }
9588 out:
9589 dprintk("<-- %s\n", __func__);
9590 return status;
9591 }
9592
9593 size_t max_response_pages(struct nfs_server *server)
9594 {
9595 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
9596 return nfs_page_array_len(0, max_resp_sz);
9597 }
9598
9599 static void nfs4_layoutget_release(void *calldata)
9600 {
9601 struct nfs4_layoutget *lgp = calldata;
9602
9603 dprintk("--> %s\n", __func__);
9604 nfs4_sequence_free_slot(&lgp->res.seq_res);
9605 pnfs_layoutget_free(lgp);
9606 dprintk("<-- %s\n", __func__);
9607 }
9608
9609 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
9610 .rpc_call_prepare = nfs4_layoutget_prepare,
9611 .rpc_call_done = nfs4_layoutget_done,
9612 .rpc_release = nfs4_layoutget_release,
9613 };
9614
9615 struct pnfs_layout_segment *
9616 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
9617 {
9618 struct inode *inode = lgp->args.inode;
9619 struct nfs_server *server = NFS_SERVER(inode);
9620 struct rpc_task *task;
9621 struct rpc_message msg = {
9622 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
9623 .rpc_argp = &lgp->args,
9624 .rpc_resp = &lgp->res,
9625 .rpc_cred = lgp->cred,
9626 };
9627 struct rpc_task_setup task_setup_data = {
9628 .rpc_client = server->client,
9629 .rpc_message = &msg,
9630 .callback_ops = &nfs4_layoutget_call_ops,
9631 .callback_data = lgp,
9632 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF |
9633 RPC_TASK_MOVEABLE,
9634 };
9635 struct pnfs_layout_segment *lseg = NULL;
9636 struct nfs4_exception exception = {
9637 .inode = inode,
9638 .timeout = *timeout,
9639 };
9640 int status = 0;
9641
9642 dprintk("--> %s\n", __func__);
9643
9644 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
9645
9646 task = rpc_run_task(&task_setup_data);
9647
9648 status = rpc_wait_for_completion_task(task);
9649 if (status != 0)
9650 goto out;
9651
9652 if (task->tk_status < 0) {
9653 status = nfs4_layoutget_handle_exception(task, lgp, &exception);
9654 *timeout = exception.timeout;
9655 } else if (lgp->res.layoutp->len == 0) {
9656 status = -EAGAIN;
9657 *timeout = nfs4_update_delay(&exception.timeout);
9658 } else
9659 lseg = pnfs_layout_process(lgp);
9660 out:
9661 trace_nfs4_layoutget(lgp->args.ctx,
9662 &lgp->args.range,
9663 &lgp->res.range,
9664 &lgp->res.stateid,
9665 status);
9666
9667 rpc_put_task(task);
9668 dprintk("<-- %s status=%d\n", __func__, status);
9669 if (status)
9670 return ERR_PTR(status);
9671 return lseg;
9672 }
9673
9674 static void
9675 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
9676 {
9677 struct nfs4_layoutreturn *lrp = calldata;
9678
9679 dprintk("--> %s\n", __func__);
9680 nfs4_setup_sequence(lrp->clp,
9681 &lrp->args.seq_args,
9682 &lrp->res.seq_res,
9683 task);
9684 if (!pnfs_layout_is_valid(lrp->args.layout))
9685 rpc_exit(task, 0);
9686 }
9687
9688 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
9689 {
9690 struct nfs4_layoutreturn *lrp = calldata;
9691 struct nfs_server *server;
9692
9693 dprintk("--> %s\n", __func__);
9694
9695 if (!nfs41_sequence_process(task, &lrp->res.seq_res))
9696 return;
9697
9698 /*
9699 * Was there an RPC level error? Assume the call succeeded,
9700 * and that we need to release the layout
9701 */
9702 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
9703 lrp->res.lrs_present = 0;
9704 return;
9705 }
9706
9707 server = NFS_SERVER(lrp->args.inode);
9708 switch (task->tk_status) {
9709 case -NFS4ERR_OLD_STATEID:
9710 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
9711 &lrp->args.range,
9712 lrp->args.inode))
9713 goto out_restart;
9714 fallthrough;
9715 default:
9716 task->tk_status = 0;
9717 fallthrough;
9718 case 0:
9719 break;
9720 case -NFS4ERR_DELAY:
9721 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
9722 break;
9723 goto out_restart;
9724 }
9725 dprintk("<-- %s\n", __func__);
9726 return;
9727 out_restart:
9728 task->tk_status = 0;
9729 nfs4_sequence_free_slot(&lrp->res.seq_res);
9730 rpc_restart_call_prepare(task);
9731 }
9732
9733 static void nfs4_layoutreturn_release(void *calldata)
9734 {
9735 struct nfs4_layoutreturn *lrp = calldata;
9736 struct pnfs_layout_hdr *lo = lrp->args.layout;
9737
9738 dprintk("--> %s\n", __func__);
9739 pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
9740 lrp->res.lrs_present ? &lrp->res.stateid : NULL);
9741 nfs4_sequence_free_slot(&lrp->res.seq_res);
9742 if (lrp->ld_private.ops && lrp->ld_private.ops->free)
9743 lrp->ld_private.ops->free(&lrp->ld_private);
9744 pnfs_put_layout_hdr(lrp->args.layout);
9745 nfs_iput_and_deactive(lrp->inode);
9746 put_cred(lrp->cred);
9747 kfree(calldata);
9748 dprintk("<-- %s\n", __func__);
9749 }
9750
9751 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
9752 .rpc_call_prepare = nfs4_layoutreturn_prepare,
9753 .rpc_call_done = nfs4_layoutreturn_done,
9754 .rpc_release = nfs4_layoutreturn_release,
9755 };
9756
9757 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
9758 {
9759 struct rpc_task *task;
9760 struct rpc_message msg = {
9761 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
9762 .rpc_argp = &lrp->args,
9763 .rpc_resp = &lrp->res,
9764 .rpc_cred = lrp->cred,
9765 };
9766 struct rpc_task_setup task_setup_data = {
9767 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
9768 .rpc_message = &msg,
9769 .callback_ops = &nfs4_layoutreturn_call_ops,
9770 .callback_data = lrp,
9771 .flags = RPC_TASK_MOVEABLE,
9772 };
9773 int status = 0;
9774
9775 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
9776 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
9777 &task_setup_data.rpc_client, &msg);
9778
9779 dprintk("--> %s\n", __func__);
9780 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
9781 if (!sync) {
9782 if (!lrp->inode) {
9783 nfs4_layoutreturn_release(lrp);
9784 return -EAGAIN;
9785 }
9786 task_setup_data.flags |= RPC_TASK_ASYNC;
9787 }
9788 if (!lrp->inode)
9789 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
9790 1);
9791 else
9792 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
9793 0);
9794 task = rpc_run_task(&task_setup_data);
9795 if (IS_ERR(task))
9796 return PTR_ERR(task);
9797 if (sync)
9798 status = task->tk_status;
9799 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
9800 dprintk("<-- %s status=%d\n", __func__, status);
9801 rpc_put_task(task);
9802 return status;
9803 }
9804
9805 static int
9806 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
9807 struct pnfs_device *pdev,
9808 const struct cred *cred)
9809 {
9810 struct nfs4_getdeviceinfo_args args = {
9811 .pdev = pdev,
9812 .notify_types = NOTIFY_DEVICEID4_CHANGE |
9813 NOTIFY_DEVICEID4_DELETE,
9814 };
9815 struct nfs4_getdeviceinfo_res res = {
9816 .pdev = pdev,
9817 };
9818 struct rpc_message msg = {
9819 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
9820 .rpc_argp = &args,
9821 .rpc_resp = &res,
9822 .rpc_cred = cred,
9823 };
9824 int status;
9825
9826 dprintk("--> %s\n", __func__);
9827 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
9828 if (res.notification & ~args.notify_types)
9829 dprintk("%s: unsupported notification\n", __func__);
9830 if (res.notification != args.notify_types)
9831 pdev->nocache = 1;
9832
9833 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status);
9834
9835 dprintk("<-- %s status=%d\n", __func__, status);
9836
9837 return status;
9838 }
9839
9840 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
9841 struct pnfs_device *pdev,
9842 const struct cred *cred)
9843 {
9844 struct nfs4_exception exception = { };
9845 int err;
9846
9847 do {
9848 err = nfs4_handle_exception(server,
9849 _nfs4_proc_getdeviceinfo(server, pdev, cred),
9850 &exception);
9851 } while (exception.retry);
9852 return err;
9853 }
9854 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
9855
9856 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
9857 {
9858 struct nfs4_layoutcommit_data *data = calldata;
9859 struct nfs_server *server = NFS_SERVER(data->args.inode);
9860
9861 nfs4_setup_sequence(server->nfs_client,
9862 &data->args.seq_args,
9863 &data->res.seq_res,
9864 task);
9865 }
9866
9867 static void
9868 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
9869 {
9870 struct nfs4_layoutcommit_data *data = calldata;
9871 struct nfs_server *server = NFS_SERVER(data->args.inode);
9872
9873 if (!nfs41_sequence_done(task, &data->res.seq_res))
9874 return;
9875
9876 switch (task->tk_status) { /* Just ignore these failures */
9877 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
9878 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
9879 case -NFS4ERR_BADLAYOUT: /* no layout */
9880 case -NFS4ERR_GRACE: /* loca_recalim always false */
9881 task->tk_status = 0;
9882 break;
9883 case 0:
9884 break;
9885 default:
9886 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
9887 rpc_restart_call_prepare(task);
9888 return;
9889 }
9890 }
9891 }
9892
9893 static void nfs4_layoutcommit_release(void *calldata)
9894 {
9895 struct nfs4_layoutcommit_data *data = calldata;
9896
9897 pnfs_cleanup_layoutcommit(data);
9898 nfs_post_op_update_inode_force_wcc(data->args.inode,
9899 data->res.fattr);
9900 put_cred(data->cred);
9901 nfs_iput_and_deactive(data->inode);
9902 kfree(data);
9903 }
9904
9905 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
9906 .rpc_call_prepare = nfs4_layoutcommit_prepare,
9907 .rpc_call_done = nfs4_layoutcommit_done,
9908 .rpc_release = nfs4_layoutcommit_release,
9909 };
9910
9911 int
9912 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
9913 {
9914 struct rpc_message msg = {
9915 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
9916 .rpc_argp = &data->args,
9917 .rpc_resp = &data->res,
9918 .rpc_cred = data->cred,
9919 };
9920 struct rpc_task_setup task_setup_data = {
9921 .task = &data->task,
9922 .rpc_client = NFS_CLIENT(data->args.inode),
9923 .rpc_message = &msg,
9924 .callback_ops = &nfs4_layoutcommit_ops,
9925 .callback_data = data,
9926 .flags = RPC_TASK_MOVEABLE,
9927 };
9928 struct rpc_task *task;
9929 int status = 0;
9930
9931 dprintk("NFS: initiating layoutcommit call. sync %d "
9932 "lbw: %llu inode %lu\n", sync,
9933 data->args.lastbytewritten,
9934 data->args.inode->i_ino);
9935
9936 if (!sync) {
9937 data->inode = nfs_igrab_and_active(data->args.inode);
9938 if (data->inode == NULL) {
9939 nfs4_layoutcommit_release(data);
9940 return -EAGAIN;
9941 }
9942 task_setup_data.flags = RPC_TASK_ASYNC;
9943 }
9944 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
9945 task = rpc_run_task(&task_setup_data);
9946 if (IS_ERR(task))
9947 return PTR_ERR(task);
9948 if (sync)
9949 status = task->tk_status;
9950 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
9951 dprintk("%s: status %d\n", __func__, status);
9952 rpc_put_task(task);
9953 return status;
9954 }
9955
9956 /*
9957 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
9958 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
9959 */
9960 static int
9961 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
9962 struct nfs_fsinfo *info,
9963 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
9964 {
9965 struct nfs41_secinfo_no_name_args args = {
9966 .style = SECINFO_STYLE_CURRENT_FH,
9967 };
9968 struct nfs4_secinfo_res res = {
9969 .flavors = flavors,
9970 };
9971 struct rpc_message msg = {
9972 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
9973 .rpc_argp = &args,
9974 .rpc_resp = &res,
9975 };
9976 struct nfs4_call_sync_data data = {
9977 .seq_server = server,
9978 .seq_args = &args.seq_args,
9979 .seq_res = &res.seq_res,
9980 };
9981 struct rpc_task_setup task_setup = {
9982 .rpc_client = server->client,
9983 .rpc_message = &msg,
9984 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
9985 .callback_data = &data,
9986 .flags = RPC_TASK_NO_ROUND_ROBIN,
9987 };
9988 const struct cred *cred = NULL;
9989 int status;
9990
9991 if (use_integrity) {
9992 task_setup.rpc_client = server->nfs_client->cl_rpcclient;
9993
9994 cred = nfs4_get_clid_cred(server->nfs_client);
9995 msg.rpc_cred = cred;
9996 }
9997
9998 dprintk("--> %s\n", __func__);
9999 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
10000 status = nfs4_call_sync_custom(&task_setup);
10001 dprintk("<-- %s status=%d\n", __func__, status);
10002
10003 put_cred(cred);
10004
10005 return status;
10006 }
10007
10008 static int
10009 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
10010 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
10011 {
10012 struct nfs4_exception exception = {
10013 .interruptible = true,
10014 };
10015 int err;
10016 do {
10017 /* first try using integrity protection */
10018 err = -NFS4ERR_WRONGSEC;
10019
10020 /* try to use integrity protection with machine cred */
10021 if (_nfs4_is_integrity_protected(server->nfs_client))
10022 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10023 flavors, true);
10024
10025 /*
10026 * if unable to use integrity protection, or SECINFO with
10027 * integrity protection returns NFS4ERR_WRONGSEC (which is
10028 * disallowed by spec, but exists in deployed servers) use
10029 * the current filesystem's rpc_client and the user cred.
10030 */
10031 if (err == -NFS4ERR_WRONGSEC)
10032 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10033 flavors, false);
10034
10035 switch (err) {
10036 case 0:
10037 case -NFS4ERR_WRONGSEC:
10038 case -ENOTSUPP:
10039 goto out;
10040 default:
10041 err = nfs4_handle_exception(server, err, &exception);
10042 }
10043 } while (exception.retry);
10044 out:
10045 return err;
10046 }
10047
10048 static int
10049 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
10050 struct nfs_fsinfo *info)
10051 {
10052 int err;
10053 struct page *page;
10054 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
10055 struct nfs4_secinfo_flavors *flavors;
10056 struct nfs4_secinfo4 *secinfo;
10057 int i;
10058
10059 page = alloc_page(GFP_KERNEL);
10060 if (!page) {
10061 err = -ENOMEM;
10062 goto out;
10063 }
10064
10065 flavors = page_address(page);
10066 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
10067
10068 /*
10069 * Fall back on "guess and check" method if
10070 * the server doesn't support SECINFO_NO_NAME
10071 */
10072 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
10073 err = nfs4_find_root_sec(server, fhandle, info);
10074 goto out_freepage;
10075 }
10076 if (err)
10077 goto out_freepage;
10078
10079 for (i = 0; i < flavors->num_flavors; i++) {
10080 secinfo = &flavors->flavors[i];
10081
10082 switch (secinfo->flavor) {
10083 case RPC_AUTH_NULL:
10084 case RPC_AUTH_UNIX:
10085 case RPC_AUTH_GSS:
10086 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
10087 &secinfo->flavor_info);
10088 break;
10089 default:
10090 flavor = RPC_AUTH_MAXFLAVOR;
10091 break;
10092 }
10093
10094 if (!nfs_auth_info_match(&server->auth_info, flavor))
10095 flavor = RPC_AUTH_MAXFLAVOR;
10096
10097 if (flavor != RPC_AUTH_MAXFLAVOR) {
10098 err = nfs4_lookup_root_sec(server, fhandle,
10099 info, flavor);
10100 if (!err)
10101 break;
10102 }
10103 }
10104
10105 if (flavor == RPC_AUTH_MAXFLAVOR)
10106 err = -EPERM;
10107
10108 out_freepage:
10109 put_page(page);
10110 if (err == -EACCES)
10111 return -EPERM;
10112 out:
10113 return err;
10114 }
10115
10116 static int _nfs41_test_stateid(struct nfs_server *server,
10117 nfs4_stateid *stateid,
10118 const struct cred *cred)
10119 {
10120 int status;
10121 struct nfs41_test_stateid_args args = {
10122 .stateid = stateid,
10123 };
10124 struct nfs41_test_stateid_res res;
10125 struct rpc_message msg = {
10126 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
10127 .rpc_argp = &args,
10128 .rpc_resp = &res,
10129 .rpc_cred = cred,
10130 };
10131 struct rpc_clnt *rpc_client = server->client;
10132
10133 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10134 &rpc_client, &msg);
10135
10136 dprintk("NFS call test_stateid %p\n", stateid);
10137 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
10138 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
10139 &args.seq_args, &res.seq_res);
10140 if (status != NFS_OK) {
10141 dprintk("NFS reply test_stateid: failed, %d\n", status);
10142 return status;
10143 }
10144 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
10145 return -res.status;
10146 }
10147
10148 static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
10149 int err, struct nfs4_exception *exception)
10150 {
10151 exception->retry = 0;
10152 switch(err) {
10153 case -NFS4ERR_DELAY:
10154 case -NFS4ERR_RETRY_UNCACHED_REP:
10155 nfs4_handle_exception(server, err, exception);
10156 break;
10157 case -NFS4ERR_BADSESSION:
10158 case -NFS4ERR_BADSLOT:
10159 case -NFS4ERR_BAD_HIGH_SLOT:
10160 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
10161 case -NFS4ERR_DEADSESSION:
10162 nfs4_do_handle_exception(server, err, exception);
10163 }
10164 }
10165
10166 /**
10167 * nfs41_test_stateid - perform a TEST_STATEID operation
10168 *
10169 * @server: server / transport on which to perform the operation
10170 * @stateid: state ID to test
10171 * @cred: credential
10172 *
10173 * Returns NFS_OK if the server recognizes that "stateid" is valid.
10174 * Otherwise a negative NFS4ERR value is returned if the operation
10175 * failed or the state ID is not currently valid.
10176 */
10177 static int nfs41_test_stateid(struct nfs_server *server,
10178 nfs4_stateid *stateid,
10179 const struct cred *cred)
10180 {
10181 struct nfs4_exception exception = {
10182 .interruptible = true,
10183 };
10184 int err;
10185 do {
10186 err = _nfs41_test_stateid(server, stateid, cred);
10187 nfs4_handle_delay_or_session_error(server, err, &exception);
10188 } while (exception.retry);
10189 return err;
10190 }
10191
10192 struct nfs_free_stateid_data {
10193 struct nfs_server *server;
10194 struct nfs41_free_stateid_args args;
10195 struct nfs41_free_stateid_res res;
10196 };
10197
10198 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
10199 {
10200 struct nfs_free_stateid_data *data = calldata;
10201 nfs4_setup_sequence(data->server->nfs_client,
10202 &data->args.seq_args,
10203 &data->res.seq_res,
10204 task);
10205 }
10206
10207 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
10208 {
10209 struct nfs_free_stateid_data *data = calldata;
10210
10211 nfs41_sequence_done(task, &data->res.seq_res);
10212
10213 switch (task->tk_status) {
10214 case -NFS4ERR_DELAY:
10215 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
10216 rpc_restart_call_prepare(task);
10217 }
10218 }
10219
10220 static void nfs41_free_stateid_release(void *calldata)
10221 {
10222 kfree(calldata);
10223 }
10224
10225 static const struct rpc_call_ops nfs41_free_stateid_ops = {
10226 .rpc_call_prepare = nfs41_free_stateid_prepare,
10227 .rpc_call_done = nfs41_free_stateid_done,
10228 .rpc_release = nfs41_free_stateid_release,
10229 };
10230
10231 /**
10232 * nfs41_free_stateid - perform a FREE_STATEID operation
10233 *
10234 * @server: server / transport on which to perform the operation
10235 * @stateid: state ID to release
10236 * @cred: credential
10237 * @privileged: set to true if this call needs to be privileged
10238 *
10239 * Note: this function is always asynchronous.
10240 */
10241 static int nfs41_free_stateid(struct nfs_server *server,
10242 const nfs4_stateid *stateid,
10243 const struct cred *cred,
10244 bool privileged)
10245 {
10246 struct rpc_message msg = {
10247 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
10248 .rpc_cred = cred,
10249 };
10250 struct rpc_task_setup task_setup = {
10251 .rpc_client = server->client,
10252 .rpc_message = &msg,
10253 .callback_ops = &nfs41_free_stateid_ops,
10254 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
10255 };
10256 struct nfs_free_stateid_data *data;
10257 struct rpc_task *task;
10258
10259 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10260 &task_setup.rpc_client, &msg);
10261
10262 dprintk("NFS call free_stateid %p\n", stateid);
10263 data = kmalloc(sizeof(*data), GFP_NOFS);
10264 if (!data)
10265 return -ENOMEM;
10266 data->server = server;
10267 nfs4_stateid_copy(&data->args.stateid, stateid);
10268
10269 task_setup.callback_data = data;
10270
10271 msg.rpc_argp = &data->args;
10272 msg.rpc_resp = &data->res;
10273 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged);
10274 task = rpc_run_task(&task_setup);
10275 if (IS_ERR(task))
10276 return PTR_ERR(task);
10277 rpc_put_task(task);
10278 return 0;
10279 }
10280
10281 static void
10282 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
10283 {
10284 const struct cred *cred = lsp->ls_state->owner->so_cred;
10285
10286 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
10287 nfs4_free_lock_state(server, lsp);
10288 }
10289
10290 static bool nfs41_match_stateid(const nfs4_stateid *s1,
10291 const nfs4_stateid *s2)
10292 {
10293 if (s1->type != s2->type)
10294 return false;
10295
10296 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
10297 return false;
10298
10299 if (s1->seqid == s2->seqid)
10300 return true;
10301
10302 return s1->seqid == 0 || s2->seqid == 0;
10303 }
10304
10305 #endif /* CONFIG_NFS_V4_1 */
10306
10307 static bool nfs4_match_stateid(const nfs4_stateid *s1,
10308 const nfs4_stateid *s2)
10309 {
10310 return nfs4_stateid_match(s1, s2);
10311 }
10312
10313
10314 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
10315 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10316 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10317 .recover_open = nfs4_open_reclaim,
10318 .recover_lock = nfs4_lock_reclaim,
10319 .establish_clid = nfs4_init_clientid,
10320 .detect_trunking = nfs40_discover_server_trunking,
10321 };
10322
10323 #if defined(CONFIG_NFS_V4_1)
10324 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
10325 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10326 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10327 .recover_open = nfs4_open_reclaim,
10328 .recover_lock = nfs4_lock_reclaim,
10329 .establish_clid = nfs41_init_clientid,
10330 .reclaim_complete = nfs41_proc_reclaim_complete,
10331 .detect_trunking = nfs41_discover_server_trunking,
10332 };
10333 #endif /* CONFIG_NFS_V4_1 */
10334
10335 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
10336 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10337 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10338 .recover_open = nfs40_open_expired,
10339 .recover_lock = nfs4_lock_expired,
10340 .establish_clid = nfs4_init_clientid,
10341 };
10342
10343 #if defined(CONFIG_NFS_V4_1)
10344 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
10345 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10346 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10347 .recover_open = nfs41_open_expired,
10348 .recover_lock = nfs41_lock_expired,
10349 .establish_clid = nfs41_init_clientid,
10350 };
10351 #endif /* CONFIG_NFS_V4_1 */
10352
10353 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
10354 .sched_state_renewal = nfs4_proc_async_renew,
10355 .get_state_renewal_cred = nfs4_get_renew_cred,
10356 .renew_lease = nfs4_proc_renew,
10357 };
10358
10359 #if defined(CONFIG_NFS_V4_1)
10360 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
10361 .sched_state_renewal = nfs41_proc_async_sequence,
10362 .get_state_renewal_cred = nfs4_get_machine_cred,
10363 .renew_lease = nfs4_proc_sequence,
10364 };
10365 #endif
10366
10367 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
10368 .get_locations = _nfs40_proc_get_locations,
10369 .fsid_present = _nfs40_proc_fsid_present,
10370 };
10371
10372 #if defined(CONFIG_NFS_V4_1)
10373 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
10374 .get_locations = _nfs41_proc_get_locations,
10375 .fsid_present = _nfs41_proc_fsid_present,
10376 };
10377 #endif /* CONFIG_NFS_V4_1 */
10378
10379 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
10380 .minor_version = 0,
10381 .init_caps = NFS_CAP_READDIRPLUS
10382 | NFS_CAP_ATOMIC_OPEN
10383 | NFS_CAP_POSIX_LOCK,
10384 .init_client = nfs40_init_client,
10385 .shutdown_client = nfs40_shutdown_client,
10386 .match_stateid = nfs4_match_stateid,
10387 .find_root_sec = nfs4_find_root_sec,
10388 .free_lock_state = nfs4_release_lockowner,
10389 .test_and_free_expired = nfs40_test_and_free_expired_stateid,
10390 .alloc_seqid = nfs_alloc_seqid,
10391 .call_sync_ops = &nfs40_call_sync_ops,
10392 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
10393 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
10394 .state_renewal_ops = &nfs40_state_renewal_ops,
10395 .mig_recovery_ops = &nfs40_mig_recovery_ops,
10396 };
10397
10398 #if defined(CONFIG_NFS_V4_1)
10399 static struct nfs_seqid *
10400 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
10401 {
10402 return NULL;
10403 }
10404
10405 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
10406 .minor_version = 1,
10407 .init_caps = NFS_CAP_READDIRPLUS
10408 | NFS_CAP_ATOMIC_OPEN
10409 | NFS_CAP_POSIX_LOCK
10410 | NFS_CAP_STATEID_NFSV41
10411 | NFS_CAP_ATOMIC_OPEN_V1
10412 | NFS_CAP_LGOPEN,
10413 .init_client = nfs41_init_client,
10414 .shutdown_client = nfs41_shutdown_client,
10415 .match_stateid = nfs41_match_stateid,
10416 .find_root_sec = nfs41_find_root_sec,
10417 .free_lock_state = nfs41_free_lock_state,
10418 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10419 .alloc_seqid = nfs_alloc_no_seqid,
10420 .session_trunk = nfs4_test_session_trunk,
10421 .call_sync_ops = &nfs41_call_sync_ops,
10422 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10423 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10424 .state_renewal_ops = &nfs41_state_renewal_ops,
10425 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10426 };
10427 #endif
10428
10429 #if defined(CONFIG_NFS_V4_2)
10430 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
10431 .minor_version = 2,
10432 .init_caps = NFS_CAP_READDIRPLUS
10433 | NFS_CAP_ATOMIC_OPEN
10434 | NFS_CAP_POSIX_LOCK
10435 | NFS_CAP_STATEID_NFSV41
10436 | NFS_CAP_ATOMIC_OPEN_V1
10437 | NFS_CAP_LGOPEN
10438 | NFS_CAP_ALLOCATE
10439 | NFS_CAP_COPY
10440 | NFS_CAP_OFFLOAD_CANCEL
10441 | NFS_CAP_COPY_NOTIFY
10442 | NFS_CAP_DEALLOCATE
10443 | NFS_CAP_SEEK
10444 | NFS_CAP_LAYOUTSTATS
10445 | NFS_CAP_CLONE
10446 | NFS_CAP_LAYOUTERROR
10447 | NFS_CAP_READ_PLUS,
10448 .init_client = nfs41_init_client,
10449 .shutdown_client = nfs41_shutdown_client,
10450 .match_stateid = nfs41_match_stateid,
10451 .find_root_sec = nfs41_find_root_sec,
10452 .free_lock_state = nfs41_free_lock_state,
10453 .call_sync_ops = &nfs41_call_sync_ops,
10454 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10455 .alloc_seqid = nfs_alloc_no_seqid,
10456 .session_trunk = nfs4_test_session_trunk,
10457 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10458 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10459 .state_renewal_ops = &nfs41_state_renewal_ops,
10460 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10461 };
10462 #endif
10463
10464 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
10465 [0] = &nfs_v4_0_minor_ops,
10466 #if defined(CONFIG_NFS_V4_1)
10467 [1] = &nfs_v4_1_minor_ops,
10468 #endif
10469 #if defined(CONFIG_NFS_V4_2)
10470 [2] = &nfs_v4_2_minor_ops,
10471 #endif
10472 };
10473
10474 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
10475 {
10476 ssize_t error, error2, error3;
10477
10478 error = generic_listxattr(dentry, list, size);
10479 if (error < 0)
10480 return error;
10481 if (list) {
10482 list += error;
10483 size -= error;
10484 }
10485
10486 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
10487 if (error2 < 0)
10488 return error2;
10489
10490 if (list) {
10491 list += error2;
10492 size -= error2;
10493 }
10494
10495 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
10496 if (error3 < 0)
10497 return error3;
10498
10499 return error + error2 + error3;
10500 }
10501
10502 static const struct inode_operations nfs4_dir_inode_operations = {
10503 .create = nfs_create,
10504 .lookup = nfs_lookup,
10505 .atomic_open = nfs_atomic_open,
10506 .link = nfs_link,
10507 .unlink = nfs_unlink,
10508 .symlink = nfs_symlink,
10509 .mkdir = nfs_mkdir,
10510 .rmdir = nfs_rmdir,
10511 .mknod = nfs_mknod,
10512 .rename = nfs_rename,
10513 .permission = nfs_permission,
10514 .getattr = nfs_getattr,
10515 .setattr = nfs_setattr,
10516 .listxattr = nfs4_listxattr,
10517 };
10518
10519 static const struct inode_operations nfs4_file_inode_operations = {
10520 .permission = nfs_permission,
10521 .getattr = nfs_getattr,
10522 .setattr = nfs_setattr,
10523 .listxattr = nfs4_listxattr,
10524 };
10525
10526 const struct nfs_rpc_ops nfs_v4_clientops = {
10527 .version = 4, /* protocol version */
10528 .dentry_ops = &nfs4_dentry_operations,
10529 .dir_inode_ops = &nfs4_dir_inode_operations,
10530 .file_inode_ops = &nfs4_file_inode_operations,
10531 .file_ops = &nfs4_file_operations,
10532 .getroot = nfs4_proc_get_root,
10533 .submount = nfs4_submount,
10534 .try_get_tree = nfs4_try_get_tree,
10535 .getattr = nfs4_proc_getattr,
10536 .setattr = nfs4_proc_setattr,
10537 .lookup = nfs4_proc_lookup,
10538 .lookupp = nfs4_proc_lookupp,
10539 .access = nfs4_proc_access,
10540 .readlink = nfs4_proc_readlink,
10541 .create = nfs4_proc_create,
10542 .remove = nfs4_proc_remove,
10543 .unlink_setup = nfs4_proc_unlink_setup,
10544 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
10545 .unlink_done = nfs4_proc_unlink_done,
10546 .rename_setup = nfs4_proc_rename_setup,
10547 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
10548 .rename_done = nfs4_proc_rename_done,
10549 .link = nfs4_proc_link,
10550 .symlink = nfs4_proc_symlink,
10551 .mkdir = nfs4_proc_mkdir,
10552 .rmdir = nfs4_proc_rmdir,
10553 .readdir = nfs4_proc_readdir,
10554 .mknod = nfs4_proc_mknod,
10555 .statfs = nfs4_proc_statfs,
10556 .fsinfo = nfs4_proc_fsinfo,
10557 .pathconf = nfs4_proc_pathconf,
10558 .set_capabilities = nfs4_server_capabilities,
10559 .decode_dirent = nfs4_decode_dirent,
10560 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
10561 .read_setup = nfs4_proc_read_setup,
10562 .read_done = nfs4_read_done,
10563 .write_setup = nfs4_proc_write_setup,
10564 .write_done = nfs4_write_done,
10565 .commit_setup = nfs4_proc_commit_setup,
10566 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
10567 .commit_done = nfs4_commit_done,
10568 .lock = nfs4_proc_lock,
10569 .clear_acl_cache = nfs4_zap_acl_attr,
10570 .close_context = nfs4_close_context,
10571 .open_context = nfs4_atomic_open,
10572 .have_delegation = nfs4_have_delegation,
10573 .alloc_client = nfs4_alloc_client,
10574 .init_client = nfs4_init_client,
10575 .free_client = nfs4_free_client,
10576 .create_server = nfs4_create_server,
10577 .clone_server = nfs_clone_server,
10578 .discover_trunking = nfs4_discover_trunking,
10579 };
10580
10581 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
10582 .name = XATTR_NAME_NFSV4_ACL,
10583 .list = nfs4_xattr_list_nfs4_acl,
10584 .get = nfs4_xattr_get_nfs4_acl,
10585 .set = nfs4_xattr_set_nfs4_acl,
10586 };
10587
10588 #ifdef CONFIG_NFS_V4_2
10589 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
10590 .prefix = XATTR_USER_PREFIX,
10591 .get = nfs4_xattr_get_nfs4_user,
10592 .set = nfs4_xattr_set_nfs4_user,
10593 };
10594 #endif
10595
10596 const struct xattr_handler *nfs4_xattr_handlers[] = {
10597 &nfs4_xattr_nfs4_acl_handler,
10598 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
10599 &nfs4_xattr_nfs4_label_handler,
10600 #endif
10601 #ifdef CONFIG_NFS_V4_2
10602 &nfs4_xattr_nfs4_user_handler,
10603 #endif
10604 NULL
10605 };