]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/nfs/nfs4proc.c
leds: add HAS_IOMEM dependency to LEDS_BCM6328/LEDS_BCM6358
[mirror_ubuntu-artful-kernel.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/file.h>
42 #include <linux/string.h>
43 #include <linux/ratelimit.h>
44 #include <linux/printk.h>
45 #include <linux/slab.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4idmap.h"
67 #include "nfs4session.h"
68 #include "fscache.h"
69
70 #include "nfs4trace.h"
71
72 #define NFSDBG_FACILITY NFSDBG_PROC
73
74 #define NFS4_POLL_RETRY_MIN (HZ/10)
75 #define NFS4_POLL_RETRY_MAX (15*HZ)
76
77 struct nfs4_opendata;
78 static int _nfs4_proc_open(struct nfs4_opendata *data);
79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
82 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
84 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
85 struct nfs_fattr *fattr, struct iattr *sattr,
86 struct nfs4_state *state, struct nfs4_label *ilabel,
87 struct nfs4_label *olabel);
88 #ifdef CONFIG_NFS_V4_1
89 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
90 struct rpc_cred *);
91 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
92 struct rpc_cred *);
93 #endif
94
95 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
96 static inline struct nfs4_label *
97 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
98 struct iattr *sattr, struct nfs4_label *label)
99 {
100 int err;
101
102 if (label == NULL)
103 return NULL;
104
105 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
106 return NULL;
107
108 err = security_dentry_init_security(dentry, sattr->ia_mode,
109 &dentry->d_name, (void **)&label->label, &label->len);
110 if (err == 0)
111 return label;
112
113 return NULL;
114 }
115 static inline void
116 nfs4_label_release_security(struct nfs4_label *label)
117 {
118 if (label)
119 security_release_secctx(label->label, label->len);
120 }
121 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
122 {
123 if (label)
124 return server->attr_bitmask;
125
126 return server->attr_bitmask_nl;
127 }
128 #else
129 static inline struct nfs4_label *
130 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
131 struct iattr *sattr, struct nfs4_label *l)
132 { return NULL; }
133 static inline void
134 nfs4_label_release_security(struct nfs4_label *label)
135 { return; }
136 static inline u32 *
137 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
138 { return server->attr_bitmask; }
139 #endif
140
141 /* Prevent leaks of NFSv4 errors into userland */
142 static int nfs4_map_errors(int err)
143 {
144 if (err >= -1000)
145 return err;
146 switch (err) {
147 case -NFS4ERR_RESOURCE:
148 case -NFS4ERR_LAYOUTTRYLATER:
149 case -NFS4ERR_RECALLCONFLICT:
150 return -EREMOTEIO;
151 case -NFS4ERR_WRONGSEC:
152 case -NFS4ERR_WRONG_CRED:
153 return -EPERM;
154 case -NFS4ERR_BADOWNER:
155 case -NFS4ERR_BADNAME:
156 return -EINVAL;
157 case -NFS4ERR_SHARE_DENIED:
158 return -EACCES;
159 case -NFS4ERR_MINOR_VERS_MISMATCH:
160 return -EPROTONOSUPPORT;
161 case -NFS4ERR_FILE_OPEN:
162 return -EBUSY;
163 default:
164 dprintk("%s could not handle NFSv4 error %d\n",
165 __func__, -err);
166 break;
167 }
168 return -EIO;
169 }
170
171 /*
172 * This is our standard bitmap for GETATTR requests.
173 */
174 const u32 nfs4_fattr_bitmap[3] = {
175 FATTR4_WORD0_TYPE
176 | FATTR4_WORD0_CHANGE
177 | FATTR4_WORD0_SIZE
178 | FATTR4_WORD0_FSID
179 | FATTR4_WORD0_FILEID,
180 FATTR4_WORD1_MODE
181 | FATTR4_WORD1_NUMLINKS
182 | FATTR4_WORD1_OWNER
183 | FATTR4_WORD1_OWNER_GROUP
184 | FATTR4_WORD1_RAWDEV
185 | FATTR4_WORD1_SPACE_USED
186 | FATTR4_WORD1_TIME_ACCESS
187 | FATTR4_WORD1_TIME_METADATA
188 | FATTR4_WORD1_TIME_MODIFY
189 | FATTR4_WORD1_MOUNTED_ON_FILEID,
190 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
191 FATTR4_WORD2_SECURITY_LABEL
192 #endif
193 };
194
195 static const u32 nfs4_pnfs_open_bitmap[3] = {
196 FATTR4_WORD0_TYPE
197 | FATTR4_WORD0_CHANGE
198 | FATTR4_WORD0_SIZE
199 | FATTR4_WORD0_FSID
200 | FATTR4_WORD0_FILEID,
201 FATTR4_WORD1_MODE
202 | FATTR4_WORD1_NUMLINKS
203 | FATTR4_WORD1_OWNER
204 | FATTR4_WORD1_OWNER_GROUP
205 | FATTR4_WORD1_RAWDEV
206 | FATTR4_WORD1_SPACE_USED
207 | FATTR4_WORD1_TIME_ACCESS
208 | FATTR4_WORD1_TIME_METADATA
209 | FATTR4_WORD1_TIME_MODIFY,
210 FATTR4_WORD2_MDSTHRESHOLD
211 };
212
213 static const u32 nfs4_open_noattr_bitmap[3] = {
214 FATTR4_WORD0_TYPE
215 | FATTR4_WORD0_CHANGE
216 | FATTR4_WORD0_FILEID,
217 };
218
219 const u32 nfs4_statfs_bitmap[3] = {
220 FATTR4_WORD0_FILES_AVAIL
221 | FATTR4_WORD0_FILES_FREE
222 | FATTR4_WORD0_FILES_TOTAL,
223 FATTR4_WORD1_SPACE_AVAIL
224 | FATTR4_WORD1_SPACE_FREE
225 | FATTR4_WORD1_SPACE_TOTAL
226 };
227
228 const u32 nfs4_pathconf_bitmap[3] = {
229 FATTR4_WORD0_MAXLINK
230 | FATTR4_WORD0_MAXNAME,
231 0
232 };
233
234 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
235 | FATTR4_WORD0_MAXREAD
236 | FATTR4_WORD0_MAXWRITE
237 | FATTR4_WORD0_LEASE_TIME,
238 FATTR4_WORD1_TIME_DELTA
239 | FATTR4_WORD1_FS_LAYOUT_TYPES,
240 FATTR4_WORD2_LAYOUT_BLKSIZE
241 | FATTR4_WORD2_CLONE_BLKSIZE
242 };
243
244 const u32 nfs4_fs_locations_bitmap[3] = {
245 FATTR4_WORD0_TYPE
246 | FATTR4_WORD0_CHANGE
247 | FATTR4_WORD0_SIZE
248 | FATTR4_WORD0_FSID
249 | FATTR4_WORD0_FILEID
250 | FATTR4_WORD0_FS_LOCATIONS,
251 FATTR4_WORD1_MODE
252 | FATTR4_WORD1_NUMLINKS
253 | FATTR4_WORD1_OWNER
254 | FATTR4_WORD1_OWNER_GROUP
255 | FATTR4_WORD1_RAWDEV
256 | FATTR4_WORD1_SPACE_USED
257 | FATTR4_WORD1_TIME_ACCESS
258 | FATTR4_WORD1_TIME_METADATA
259 | FATTR4_WORD1_TIME_MODIFY
260 | FATTR4_WORD1_MOUNTED_ON_FILEID,
261 };
262
263 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
264 struct nfs4_readdir_arg *readdir)
265 {
266 __be32 *start, *p;
267
268 if (cookie > 2) {
269 readdir->cookie = cookie;
270 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
271 return;
272 }
273
274 readdir->cookie = 0;
275 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
276 if (cookie == 2)
277 return;
278
279 /*
280 * NFSv4 servers do not return entries for '.' and '..'
281 * Therefore, we fake these entries here. We let '.'
282 * have cookie 0 and '..' have cookie 1. Note that
283 * when talking to the server, we always send cookie 0
284 * instead of 1 or 2.
285 */
286 start = p = kmap_atomic(*readdir->pages);
287
288 if (cookie == 0) {
289 *p++ = xdr_one; /* next */
290 *p++ = xdr_zero; /* cookie, first word */
291 *p++ = xdr_one; /* cookie, second word */
292 *p++ = xdr_one; /* entry len */
293 memcpy(p, ".\0\0\0", 4); /* entry */
294 p++;
295 *p++ = xdr_one; /* bitmap length */
296 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
297 *p++ = htonl(8); /* attribute buffer length */
298 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
299 }
300
301 *p++ = xdr_one; /* next */
302 *p++ = xdr_zero; /* cookie, first word */
303 *p++ = xdr_two; /* cookie, second word */
304 *p++ = xdr_two; /* entry len */
305 memcpy(p, "..\0\0", 4); /* entry */
306 p++;
307 *p++ = xdr_one; /* bitmap length */
308 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
309 *p++ = htonl(8); /* attribute buffer length */
310 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
311
312 readdir->pgbase = (char *)p - (char *)start;
313 readdir->count -= readdir->pgbase;
314 kunmap_atomic(start);
315 }
316
317 static long nfs4_update_delay(long *timeout)
318 {
319 long ret;
320 if (!timeout)
321 return NFS4_POLL_RETRY_MAX;
322 if (*timeout <= 0)
323 *timeout = NFS4_POLL_RETRY_MIN;
324 if (*timeout > NFS4_POLL_RETRY_MAX)
325 *timeout = NFS4_POLL_RETRY_MAX;
326 ret = *timeout;
327 *timeout <<= 1;
328 return ret;
329 }
330
331 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
332 {
333 int res = 0;
334
335 might_sleep();
336
337 freezable_schedule_timeout_killable_unsafe(
338 nfs4_update_delay(timeout));
339 if (fatal_signal_pending(current))
340 res = -ERESTARTSYS;
341 return res;
342 }
343
344 /* This is the error handling routine for processes that are allowed
345 * to sleep.
346 */
347 static int nfs4_do_handle_exception(struct nfs_server *server,
348 int errorcode, struct nfs4_exception *exception)
349 {
350 struct nfs_client *clp = server->nfs_client;
351 struct nfs4_state *state = exception->state;
352 struct inode *inode = exception->inode;
353 int ret = errorcode;
354
355 exception->delay = 0;
356 exception->recovering = 0;
357 exception->retry = 0;
358 switch(errorcode) {
359 case 0:
360 return 0;
361 case -NFS4ERR_OPENMODE:
362 case -NFS4ERR_DELEG_REVOKED:
363 case -NFS4ERR_ADMIN_REVOKED:
364 case -NFS4ERR_BAD_STATEID:
365 if (inode && nfs_async_inode_return_delegation(inode,
366 NULL) == 0)
367 goto wait_on_recovery;
368 if (state == NULL)
369 break;
370 ret = nfs4_schedule_stateid_recovery(server, state);
371 if (ret < 0)
372 break;
373 goto wait_on_recovery;
374 case -NFS4ERR_EXPIRED:
375 if (state != NULL) {
376 ret = nfs4_schedule_stateid_recovery(server, state);
377 if (ret < 0)
378 break;
379 }
380 case -NFS4ERR_STALE_STATEID:
381 case -NFS4ERR_STALE_CLIENTID:
382 nfs4_schedule_lease_recovery(clp);
383 goto wait_on_recovery;
384 case -NFS4ERR_MOVED:
385 ret = nfs4_schedule_migration_recovery(server);
386 if (ret < 0)
387 break;
388 goto wait_on_recovery;
389 case -NFS4ERR_LEASE_MOVED:
390 nfs4_schedule_lease_moved_recovery(clp);
391 goto wait_on_recovery;
392 #if defined(CONFIG_NFS_V4_1)
393 case -NFS4ERR_BADSESSION:
394 case -NFS4ERR_BADSLOT:
395 case -NFS4ERR_BAD_HIGH_SLOT:
396 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
397 case -NFS4ERR_DEADSESSION:
398 case -NFS4ERR_SEQ_FALSE_RETRY:
399 case -NFS4ERR_SEQ_MISORDERED:
400 dprintk("%s ERROR: %d Reset session\n", __func__,
401 errorcode);
402 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
403 goto wait_on_recovery;
404 #endif /* defined(CONFIG_NFS_V4_1) */
405 case -NFS4ERR_FILE_OPEN:
406 if (exception->timeout > HZ) {
407 /* We have retried a decent amount, time to
408 * fail
409 */
410 ret = -EBUSY;
411 break;
412 }
413 case -NFS4ERR_DELAY:
414 nfs_inc_server_stats(server, NFSIOS_DELAY);
415 case -NFS4ERR_GRACE:
416 exception->delay = 1;
417 return 0;
418
419 case -NFS4ERR_RETRY_UNCACHED_REP:
420 case -NFS4ERR_OLD_STATEID:
421 exception->retry = 1;
422 break;
423 case -NFS4ERR_BADOWNER:
424 /* The following works around a Linux server bug! */
425 case -NFS4ERR_BADNAME:
426 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
427 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
428 exception->retry = 1;
429 printk(KERN_WARNING "NFS: v4 server %s "
430 "does not accept raw "
431 "uid/gids. "
432 "Reenabling the idmapper.\n",
433 server->nfs_client->cl_hostname);
434 }
435 }
436 /* We failed to handle the error */
437 return nfs4_map_errors(ret);
438 wait_on_recovery:
439 exception->recovering = 1;
440 return 0;
441 }
442
443 /* This is the error handling routine for processes that are allowed
444 * to sleep.
445 */
446 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
447 {
448 struct nfs_client *clp = server->nfs_client;
449 int ret;
450
451 ret = nfs4_do_handle_exception(server, errorcode, exception);
452 if (exception->delay) {
453 ret = nfs4_delay(server->client, &exception->timeout);
454 goto out_retry;
455 }
456 if (exception->recovering) {
457 ret = nfs4_wait_clnt_recover(clp);
458 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
459 return -EIO;
460 goto out_retry;
461 }
462 return ret;
463 out_retry:
464 if (ret == 0)
465 exception->retry = 1;
466 return ret;
467 }
468
469 static int
470 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
471 int errorcode, struct nfs4_exception *exception)
472 {
473 struct nfs_client *clp = server->nfs_client;
474 int ret;
475
476 ret = nfs4_do_handle_exception(server, errorcode, exception);
477 if (exception->delay) {
478 rpc_delay(task, nfs4_update_delay(&exception->timeout));
479 goto out_retry;
480 }
481 if (exception->recovering) {
482 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
483 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
484 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
485 goto out_retry;
486 }
487 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
488 ret = -EIO;
489 return ret;
490 out_retry:
491 if (ret == 0)
492 exception->retry = 1;
493 return ret;
494 }
495
496 static int
497 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
498 struct nfs4_state *state, long *timeout)
499 {
500 struct nfs4_exception exception = {
501 .state = state,
502 };
503
504 if (task->tk_status >= 0)
505 return 0;
506 if (timeout)
507 exception.timeout = *timeout;
508 task->tk_status = nfs4_async_handle_exception(task, server,
509 task->tk_status,
510 &exception);
511 if (exception.delay && timeout)
512 *timeout = exception.timeout;
513 if (exception.retry)
514 return -EAGAIN;
515 return 0;
516 }
517
518 /*
519 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
520 * or 'false' otherwise.
521 */
522 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
523 {
524 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
525
526 if (flavor == RPC_AUTH_GSS_KRB5I ||
527 flavor == RPC_AUTH_GSS_KRB5P)
528 return true;
529
530 return false;
531 }
532
533 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
534 {
535 spin_lock(&clp->cl_lock);
536 if (time_before(clp->cl_last_renewal,timestamp))
537 clp->cl_last_renewal = timestamp;
538 spin_unlock(&clp->cl_lock);
539 }
540
541 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
542 {
543 struct nfs_client *clp = server->nfs_client;
544
545 if (!nfs4_has_session(clp))
546 do_renew_lease(clp, timestamp);
547 }
548
549 struct nfs4_call_sync_data {
550 const struct nfs_server *seq_server;
551 struct nfs4_sequence_args *seq_args;
552 struct nfs4_sequence_res *seq_res;
553 };
554
555 void nfs4_init_sequence(struct nfs4_sequence_args *args,
556 struct nfs4_sequence_res *res, int cache_reply)
557 {
558 args->sa_slot = NULL;
559 args->sa_cache_this = cache_reply;
560 args->sa_privileged = 0;
561
562 res->sr_slot = NULL;
563 }
564
565 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
566 {
567 args->sa_privileged = 1;
568 }
569
570 int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
571 struct nfs4_sequence_args *args,
572 struct nfs4_sequence_res *res,
573 struct rpc_task *task)
574 {
575 struct nfs4_slot *slot;
576
577 /* slot already allocated? */
578 if (res->sr_slot != NULL)
579 goto out_start;
580
581 spin_lock(&tbl->slot_tbl_lock);
582 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
583 goto out_sleep;
584
585 slot = nfs4_alloc_slot(tbl);
586 if (IS_ERR(slot)) {
587 if (slot == ERR_PTR(-ENOMEM))
588 task->tk_timeout = HZ >> 2;
589 goto out_sleep;
590 }
591 spin_unlock(&tbl->slot_tbl_lock);
592
593 args->sa_slot = slot;
594 res->sr_slot = slot;
595
596 out_start:
597 rpc_call_start(task);
598 return 0;
599
600 out_sleep:
601 if (args->sa_privileged)
602 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
603 NULL, RPC_PRIORITY_PRIVILEGED);
604 else
605 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
606 spin_unlock(&tbl->slot_tbl_lock);
607 return -EAGAIN;
608 }
609 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
610
611 static int nfs40_sequence_done(struct rpc_task *task,
612 struct nfs4_sequence_res *res)
613 {
614 struct nfs4_slot *slot = res->sr_slot;
615 struct nfs4_slot_table *tbl;
616
617 if (slot == NULL)
618 goto out;
619
620 tbl = slot->table;
621 spin_lock(&tbl->slot_tbl_lock);
622 if (!nfs41_wake_and_assign_slot(tbl, slot))
623 nfs4_free_slot(tbl, slot);
624 spin_unlock(&tbl->slot_tbl_lock);
625
626 res->sr_slot = NULL;
627 out:
628 return 1;
629 }
630
631 #if defined(CONFIG_NFS_V4_1)
632
633 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
634 {
635 struct nfs4_session *session;
636 struct nfs4_slot_table *tbl;
637 struct nfs4_slot *slot = res->sr_slot;
638 bool send_new_highest_used_slotid = false;
639
640 tbl = slot->table;
641 session = tbl->session;
642
643 spin_lock(&tbl->slot_tbl_lock);
644 /* Be nice to the server: try to ensure that the last transmitted
645 * value for highest_user_slotid <= target_highest_slotid
646 */
647 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
648 send_new_highest_used_slotid = true;
649
650 if (nfs41_wake_and_assign_slot(tbl, slot)) {
651 send_new_highest_used_slotid = false;
652 goto out_unlock;
653 }
654 nfs4_free_slot(tbl, slot);
655
656 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
657 send_new_highest_used_slotid = false;
658 out_unlock:
659 spin_unlock(&tbl->slot_tbl_lock);
660 res->sr_slot = NULL;
661 if (send_new_highest_used_slotid)
662 nfs41_notify_server(session->clp);
663 }
664
665 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
666 {
667 struct nfs4_session *session;
668 struct nfs4_slot *slot = res->sr_slot;
669 struct nfs_client *clp;
670 bool interrupted = false;
671 int ret = 1;
672
673 if (slot == NULL)
674 goto out_noaction;
675 /* don't increment the sequence number if the task wasn't sent */
676 if (!RPC_WAS_SENT(task))
677 goto out;
678
679 session = slot->table->session;
680
681 if (slot->interrupted) {
682 slot->interrupted = 0;
683 interrupted = true;
684 }
685
686 trace_nfs4_sequence_done(session, res);
687 /* Check the SEQUENCE operation status */
688 switch (res->sr_status) {
689 case 0:
690 /* Update the slot's sequence and clientid lease timer */
691 ++slot->seq_nr;
692 clp = session->clp;
693 do_renew_lease(clp, res->sr_timestamp);
694 /* Check sequence flags */
695 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
696 nfs41_update_target_slotid(slot->table, slot, res);
697 break;
698 case 1:
699 /*
700 * sr_status remains 1 if an RPC level error occurred.
701 * The server may or may not have processed the sequence
702 * operation..
703 * Mark the slot as having hosted an interrupted RPC call.
704 */
705 slot->interrupted = 1;
706 goto out;
707 case -NFS4ERR_DELAY:
708 /* The server detected a resend of the RPC call and
709 * returned NFS4ERR_DELAY as per Section 2.10.6.2
710 * of RFC5661.
711 */
712 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
713 __func__,
714 slot->slot_nr,
715 slot->seq_nr);
716 goto out_retry;
717 case -NFS4ERR_BADSLOT:
718 /*
719 * The slot id we used was probably retired. Try again
720 * using a different slot id.
721 */
722 goto retry_nowait;
723 case -NFS4ERR_SEQ_MISORDERED:
724 /*
725 * Was the last operation on this sequence interrupted?
726 * If so, retry after bumping the sequence number.
727 */
728 if (interrupted) {
729 ++slot->seq_nr;
730 goto retry_nowait;
731 }
732 /*
733 * Could this slot have been previously retired?
734 * If so, then the server may be expecting seq_nr = 1!
735 */
736 if (slot->seq_nr != 1) {
737 slot->seq_nr = 1;
738 goto retry_nowait;
739 }
740 break;
741 case -NFS4ERR_SEQ_FALSE_RETRY:
742 ++slot->seq_nr;
743 goto retry_nowait;
744 default:
745 /* Just update the slot sequence no. */
746 ++slot->seq_nr;
747 }
748 out:
749 /* The session may be reset by one of the error handlers. */
750 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
751 nfs41_sequence_free_slot(res);
752 out_noaction:
753 return ret;
754 retry_nowait:
755 if (rpc_restart_call_prepare(task)) {
756 task->tk_status = 0;
757 ret = 0;
758 }
759 goto out;
760 out_retry:
761 if (!rpc_restart_call(task))
762 goto out;
763 rpc_delay(task, NFS4_POLL_RETRY_MAX);
764 return 0;
765 }
766 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
767
768 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
769 {
770 if (res->sr_slot == NULL)
771 return 1;
772 if (!res->sr_slot->table->session)
773 return nfs40_sequence_done(task, res);
774 return nfs41_sequence_done(task, res);
775 }
776 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
777
778 int nfs41_setup_sequence(struct nfs4_session *session,
779 struct nfs4_sequence_args *args,
780 struct nfs4_sequence_res *res,
781 struct rpc_task *task)
782 {
783 struct nfs4_slot *slot;
784 struct nfs4_slot_table *tbl;
785
786 dprintk("--> %s\n", __func__);
787 /* slot already allocated? */
788 if (res->sr_slot != NULL)
789 goto out_success;
790
791 tbl = &session->fc_slot_table;
792
793 task->tk_timeout = 0;
794
795 spin_lock(&tbl->slot_tbl_lock);
796 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
797 !args->sa_privileged) {
798 /* The state manager will wait until the slot table is empty */
799 dprintk("%s session is draining\n", __func__);
800 goto out_sleep;
801 }
802
803 slot = nfs4_alloc_slot(tbl);
804 if (IS_ERR(slot)) {
805 /* If out of memory, try again in 1/4 second */
806 if (slot == ERR_PTR(-ENOMEM))
807 task->tk_timeout = HZ >> 2;
808 dprintk("<-- %s: no free slots\n", __func__);
809 goto out_sleep;
810 }
811 spin_unlock(&tbl->slot_tbl_lock);
812
813 args->sa_slot = slot;
814
815 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
816 slot->slot_nr, slot->seq_nr);
817
818 res->sr_slot = slot;
819 res->sr_timestamp = jiffies;
820 res->sr_status_flags = 0;
821 /*
822 * sr_status is only set in decode_sequence, and so will remain
823 * set to 1 if an rpc level failure occurs.
824 */
825 res->sr_status = 1;
826 trace_nfs4_setup_sequence(session, args);
827 out_success:
828 rpc_call_start(task);
829 return 0;
830 out_sleep:
831 /* Privileged tasks are queued with top priority */
832 if (args->sa_privileged)
833 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
834 NULL, RPC_PRIORITY_PRIVILEGED);
835 else
836 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
837 spin_unlock(&tbl->slot_tbl_lock);
838 return -EAGAIN;
839 }
840 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
841
842 static int nfs4_setup_sequence(const struct nfs_server *server,
843 struct nfs4_sequence_args *args,
844 struct nfs4_sequence_res *res,
845 struct rpc_task *task)
846 {
847 struct nfs4_session *session = nfs4_get_session(server);
848 int ret = 0;
849
850 if (!session)
851 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
852 args, res, task);
853
854 dprintk("--> %s clp %p session %p sr_slot %u\n",
855 __func__, session->clp, session, res->sr_slot ?
856 res->sr_slot->slot_nr : NFS4_NO_SLOT);
857
858 ret = nfs41_setup_sequence(session, args, res, task);
859
860 dprintk("<-- %s status=%d\n", __func__, ret);
861 return ret;
862 }
863
864 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
865 {
866 struct nfs4_call_sync_data *data = calldata;
867 struct nfs4_session *session = nfs4_get_session(data->seq_server);
868
869 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
870
871 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
872 }
873
874 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
875 {
876 struct nfs4_call_sync_data *data = calldata;
877
878 nfs41_sequence_done(task, data->seq_res);
879 }
880
881 static const struct rpc_call_ops nfs41_call_sync_ops = {
882 .rpc_call_prepare = nfs41_call_sync_prepare,
883 .rpc_call_done = nfs41_call_sync_done,
884 };
885
886 #else /* !CONFIG_NFS_V4_1 */
887
888 static int nfs4_setup_sequence(const struct nfs_server *server,
889 struct nfs4_sequence_args *args,
890 struct nfs4_sequence_res *res,
891 struct rpc_task *task)
892 {
893 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
894 args, res, task);
895 }
896
897 int nfs4_sequence_done(struct rpc_task *task,
898 struct nfs4_sequence_res *res)
899 {
900 return nfs40_sequence_done(task, res);
901 }
902 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
903
904 #endif /* !CONFIG_NFS_V4_1 */
905
906 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
907 {
908 struct nfs4_call_sync_data *data = calldata;
909 nfs4_setup_sequence(data->seq_server,
910 data->seq_args, data->seq_res, task);
911 }
912
913 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
914 {
915 struct nfs4_call_sync_data *data = calldata;
916 nfs4_sequence_done(task, data->seq_res);
917 }
918
919 static const struct rpc_call_ops nfs40_call_sync_ops = {
920 .rpc_call_prepare = nfs40_call_sync_prepare,
921 .rpc_call_done = nfs40_call_sync_done,
922 };
923
924 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
925 struct nfs_server *server,
926 struct rpc_message *msg,
927 struct nfs4_sequence_args *args,
928 struct nfs4_sequence_res *res)
929 {
930 int ret;
931 struct rpc_task *task;
932 struct nfs_client *clp = server->nfs_client;
933 struct nfs4_call_sync_data data = {
934 .seq_server = server,
935 .seq_args = args,
936 .seq_res = res,
937 };
938 struct rpc_task_setup task_setup = {
939 .rpc_client = clnt,
940 .rpc_message = msg,
941 .callback_ops = clp->cl_mvops->call_sync_ops,
942 .callback_data = &data
943 };
944
945 task = rpc_run_task(&task_setup);
946 if (IS_ERR(task))
947 ret = PTR_ERR(task);
948 else {
949 ret = task->tk_status;
950 rpc_put_task(task);
951 }
952 return ret;
953 }
954
955 int nfs4_call_sync(struct rpc_clnt *clnt,
956 struct nfs_server *server,
957 struct rpc_message *msg,
958 struct nfs4_sequence_args *args,
959 struct nfs4_sequence_res *res,
960 int cache_reply)
961 {
962 nfs4_init_sequence(args, res, cache_reply);
963 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
964 }
965
966 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
967 {
968 struct nfs_inode *nfsi = NFS_I(dir);
969
970 spin_lock(&dir->i_lock);
971 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
972 if (!cinfo->atomic || cinfo->before != dir->i_version)
973 nfs_force_lookup_revalidate(dir);
974 dir->i_version = cinfo->after;
975 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
976 nfs_fscache_invalidate(dir);
977 spin_unlock(&dir->i_lock);
978 }
979
980 struct nfs4_opendata {
981 struct kref kref;
982 struct nfs_openargs o_arg;
983 struct nfs_openres o_res;
984 struct nfs_open_confirmargs c_arg;
985 struct nfs_open_confirmres c_res;
986 struct nfs4_string owner_name;
987 struct nfs4_string group_name;
988 struct nfs4_label *a_label;
989 struct nfs_fattr f_attr;
990 struct nfs4_label *f_label;
991 struct dentry *dir;
992 struct dentry *dentry;
993 struct nfs4_state_owner *owner;
994 struct nfs4_state *state;
995 struct iattr attrs;
996 unsigned long timestamp;
997 unsigned int rpc_done : 1;
998 unsigned int file_created : 1;
999 unsigned int is_recover : 1;
1000 int rpc_status;
1001 int cancelled;
1002 };
1003
1004 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1005 int err, struct nfs4_exception *exception)
1006 {
1007 if (err != -EINVAL)
1008 return false;
1009 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1010 return false;
1011 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1012 exception->retry = 1;
1013 return true;
1014 }
1015
1016 static u32
1017 nfs4_map_atomic_open_share(struct nfs_server *server,
1018 fmode_t fmode, int openflags)
1019 {
1020 u32 res = 0;
1021
1022 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1023 case FMODE_READ:
1024 res = NFS4_SHARE_ACCESS_READ;
1025 break;
1026 case FMODE_WRITE:
1027 res = NFS4_SHARE_ACCESS_WRITE;
1028 break;
1029 case FMODE_READ|FMODE_WRITE:
1030 res = NFS4_SHARE_ACCESS_BOTH;
1031 }
1032 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1033 goto out;
1034 /* Want no delegation if we're using O_DIRECT */
1035 if (openflags & O_DIRECT)
1036 res |= NFS4_SHARE_WANT_NO_DELEG;
1037 out:
1038 return res;
1039 }
1040
1041 static enum open_claim_type4
1042 nfs4_map_atomic_open_claim(struct nfs_server *server,
1043 enum open_claim_type4 claim)
1044 {
1045 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1046 return claim;
1047 switch (claim) {
1048 default:
1049 return claim;
1050 case NFS4_OPEN_CLAIM_FH:
1051 return NFS4_OPEN_CLAIM_NULL;
1052 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1053 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1054 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1055 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1056 }
1057 }
1058
1059 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1060 {
1061 p->o_res.f_attr = &p->f_attr;
1062 p->o_res.f_label = p->f_label;
1063 p->o_res.seqid = p->o_arg.seqid;
1064 p->c_res.seqid = p->c_arg.seqid;
1065 p->o_res.server = p->o_arg.server;
1066 p->o_res.access_request = p->o_arg.access;
1067 nfs_fattr_init(&p->f_attr);
1068 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1069 }
1070
1071 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1072 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1073 const struct iattr *attrs,
1074 struct nfs4_label *label,
1075 enum open_claim_type4 claim,
1076 gfp_t gfp_mask)
1077 {
1078 struct dentry *parent = dget_parent(dentry);
1079 struct inode *dir = d_inode(parent);
1080 struct nfs_server *server = NFS_SERVER(dir);
1081 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1082 struct nfs4_opendata *p;
1083
1084 p = kzalloc(sizeof(*p), gfp_mask);
1085 if (p == NULL)
1086 goto err;
1087
1088 p->f_label = nfs4_label_alloc(server, gfp_mask);
1089 if (IS_ERR(p->f_label))
1090 goto err_free_p;
1091
1092 p->a_label = nfs4_label_alloc(server, gfp_mask);
1093 if (IS_ERR(p->a_label))
1094 goto err_free_f;
1095
1096 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1097 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1098 if (IS_ERR(p->o_arg.seqid))
1099 goto err_free_label;
1100 nfs_sb_active(dentry->d_sb);
1101 p->dentry = dget(dentry);
1102 p->dir = parent;
1103 p->owner = sp;
1104 atomic_inc(&sp->so_count);
1105 p->o_arg.open_flags = flags;
1106 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1107 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1108 fmode, flags);
1109 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1110 * will return permission denied for all bits until close */
1111 if (!(flags & O_EXCL)) {
1112 /* ask server to check for all possible rights as results
1113 * are cached */
1114 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1115 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1116 }
1117 p->o_arg.clientid = server->nfs_client->cl_clientid;
1118 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1119 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1120 p->o_arg.name = &dentry->d_name;
1121 p->o_arg.server = server;
1122 p->o_arg.bitmask = nfs4_bitmask(server, label);
1123 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1124 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1125 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1126 switch (p->o_arg.claim) {
1127 case NFS4_OPEN_CLAIM_NULL:
1128 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1129 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1130 p->o_arg.fh = NFS_FH(dir);
1131 break;
1132 case NFS4_OPEN_CLAIM_PREVIOUS:
1133 case NFS4_OPEN_CLAIM_FH:
1134 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1135 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1136 p->o_arg.fh = NFS_FH(d_inode(dentry));
1137 }
1138 if (attrs != NULL && attrs->ia_valid != 0) {
1139 __u32 verf[2];
1140
1141 p->o_arg.u.attrs = &p->attrs;
1142 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1143
1144 verf[0] = jiffies;
1145 verf[1] = current->pid;
1146 memcpy(p->o_arg.u.verifier.data, verf,
1147 sizeof(p->o_arg.u.verifier.data));
1148 }
1149 p->c_arg.fh = &p->o_res.fh;
1150 p->c_arg.stateid = &p->o_res.stateid;
1151 p->c_arg.seqid = p->o_arg.seqid;
1152 nfs4_init_opendata_res(p);
1153 kref_init(&p->kref);
1154 return p;
1155
1156 err_free_label:
1157 nfs4_label_free(p->a_label);
1158 err_free_f:
1159 nfs4_label_free(p->f_label);
1160 err_free_p:
1161 kfree(p);
1162 err:
1163 dput(parent);
1164 return NULL;
1165 }
1166
1167 static void nfs4_opendata_free(struct kref *kref)
1168 {
1169 struct nfs4_opendata *p = container_of(kref,
1170 struct nfs4_opendata, kref);
1171 struct super_block *sb = p->dentry->d_sb;
1172
1173 nfs_free_seqid(p->o_arg.seqid);
1174 if (p->state != NULL)
1175 nfs4_put_open_state(p->state);
1176 nfs4_put_state_owner(p->owner);
1177
1178 nfs4_label_free(p->a_label);
1179 nfs4_label_free(p->f_label);
1180
1181 dput(p->dir);
1182 dput(p->dentry);
1183 nfs_sb_deactive(sb);
1184 nfs_fattr_free_names(&p->f_attr);
1185 kfree(p->f_attr.mdsthreshold);
1186 kfree(p);
1187 }
1188
1189 static void nfs4_opendata_put(struct nfs4_opendata *p)
1190 {
1191 if (p != NULL)
1192 kref_put(&p->kref, nfs4_opendata_free);
1193 }
1194
1195 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1196 {
1197 int ret;
1198
1199 ret = rpc_wait_for_completion_task(task);
1200 return ret;
1201 }
1202
1203 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1204 fmode_t fmode)
1205 {
1206 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1207 case FMODE_READ|FMODE_WRITE:
1208 return state->n_rdwr != 0;
1209 case FMODE_WRITE:
1210 return state->n_wronly != 0;
1211 case FMODE_READ:
1212 return state->n_rdonly != 0;
1213 }
1214 WARN_ON_ONCE(1);
1215 return false;
1216 }
1217
1218 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1219 {
1220 int ret = 0;
1221
1222 if (open_mode & (O_EXCL|O_TRUNC))
1223 goto out;
1224 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1225 case FMODE_READ:
1226 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1227 && state->n_rdonly != 0;
1228 break;
1229 case FMODE_WRITE:
1230 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1231 && state->n_wronly != 0;
1232 break;
1233 case FMODE_READ|FMODE_WRITE:
1234 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1235 && state->n_rdwr != 0;
1236 }
1237 out:
1238 return ret;
1239 }
1240
1241 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1242 enum open_claim_type4 claim)
1243 {
1244 if (delegation == NULL)
1245 return 0;
1246 if ((delegation->type & fmode) != fmode)
1247 return 0;
1248 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1249 return 0;
1250 switch (claim) {
1251 case NFS4_OPEN_CLAIM_NULL:
1252 case NFS4_OPEN_CLAIM_FH:
1253 break;
1254 case NFS4_OPEN_CLAIM_PREVIOUS:
1255 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1256 break;
1257 default:
1258 return 0;
1259 }
1260 nfs_mark_delegation_referenced(delegation);
1261 return 1;
1262 }
1263
1264 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1265 {
1266 switch (fmode) {
1267 case FMODE_WRITE:
1268 state->n_wronly++;
1269 break;
1270 case FMODE_READ:
1271 state->n_rdonly++;
1272 break;
1273 case FMODE_READ|FMODE_WRITE:
1274 state->n_rdwr++;
1275 }
1276 nfs4_state_set_mode_locked(state, state->state | fmode);
1277 }
1278
1279 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1280 {
1281 struct nfs_client *clp = state->owner->so_server->nfs_client;
1282 bool need_recover = false;
1283
1284 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1285 need_recover = true;
1286 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1287 need_recover = true;
1288 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1289 need_recover = true;
1290 if (need_recover)
1291 nfs4_state_mark_reclaim_nograce(clp, state);
1292 }
1293
1294 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1295 nfs4_stateid *stateid)
1296 {
1297 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1298 return true;
1299 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1300 nfs_test_and_clear_all_open_stateid(state);
1301 return true;
1302 }
1303 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1304 return true;
1305 return false;
1306 }
1307
1308 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1309 {
1310 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1311 return;
1312 if (state->n_wronly)
1313 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1314 if (state->n_rdonly)
1315 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1316 if (state->n_rdwr)
1317 set_bit(NFS_O_RDWR_STATE, &state->flags);
1318 set_bit(NFS_OPEN_STATE, &state->flags);
1319 }
1320
1321 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1322 nfs4_stateid *arg_stateid,
1323 nfs4_stateid *stateid, fmode_t fmode)
1324 {
1325 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1326 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1327 case FMODE_WRITE:
1328 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1329 break;
1330 case FMODE_READ:
1331 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1332 break;
1333 case 0:
1334 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1335 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1336 clear_bit(NFS_OPEN_STATE, &state->flags);
1337 }
1338 if (stateid == NULL)
1339 return;
1340 /* Handle races with OPEN */
1341 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
1342 (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1343 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
1344 nfs_resync_open_stateid_locked(state);
1345 return;
1346 }
1347 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1348 nfs4_stateid_copy(&state->stateid, stateid);
1349 nfs4_stateid_copy(&state->open_stateid, stateid);
1350 }
1351
1352 static void nfs_clear_open_stateid(struct nfs4_state *state,
1353 nfs4_stateid *arg_stateid,
1354 nfs4_stateid *stateid, fmode_t fmode)
1355 {
1356 write_seqlock(&state->seqlock);
1357 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
1358 write_sequnlock(&state->seqlock);
1359 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1360 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1361 }
1362
1363 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1364 {
1365 switch (fmode) {
1366 case FMODE_READ:
1367 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1368 break;
1369 case FMODE_WRITE:
1370 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1371 break;
1372 case FMODE_READ|FMODE_WRITE:
1373 set_bit(NFS_O_RDWR_STATE, &state->flags);
1374 }
1375 if (!nfs_need_update_open_stateid(state, stateid))
1376 return;
1377 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1378 nfs4_stateid_copy(&state->stateid, stateid);
1379 nfs4_stateid_copy(&state->open_stateid, stateid);
1380 }
1381
1382 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1383 {
1384 /*
1385 * Protect the call to nfs4_state_set_mode_locked and
1386 * serialise the stateid update
1387 */
1388 write_seqlock(&state->seqlock);
1389 if (deleg_stateid != NULL) {
1390 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1391 set_bit(NFS_DELEGATED_STATE, &state->flags);
1392 }
1393 if (open_stateid != NULL)
1394 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1395 write_sequnlock(&state->seqlock);
1396 spin_lock(&state->owner->so_lock);
1397 update_open_stateflags(state, fmode);
1398 spin_unlock(&state->owner->so_lock);
1399 }
1400
1401 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1402 {
1403 struct nfs_inode *nfsi = NFS_I(state->inode);
1404 struct nfs_delegation *deleg_cur;
1405 int ret = 0;
1406
1407 fmode &= (FMODE_READ|FMODE_WRITE);
1408
1409 rcu_read_lock();
1410 deleg_cur = rcu_dereference(nfsi->delegation);
1411 if (deleg_cur == NULL)
1412 goto no_delegation;
1413
1414 spin_lock(&deleg_cur->lock);
1415 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1416 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1417 (deleg_cur->type & fmode) != fmode)
1418 goto no_delegation_unlock;
1419
1420 if (delegation == NULL)
1421 delegation = &deleg_cur->stateid;
1422 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1423 goto no_delegation_unlock;
1424
1425 nfs_mark_delegation_referenced(deleg_cur);
1426 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1427 ret = 1;
1428 no_delegation_unlock:
1429 spin_unlock(&deleg_cur->lock);
1430 no_delegation:
1431 rcu_read_unlock();
1432
1433 if (!ret && open_stateid != NULL) {
1434 __update_open_stateid(state, open_stateid, NULL, fmode);
1435 ret = 1;
1436 }
1437 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1438 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1439
1440 return ret;
1441 }
1442
1443 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1444 const nfs4_stateid *stateid)
1445 {
1446 struct nfs4_state *state = lsp->ls_state;
1447 bool ret = false;
1448
1449 spin_lock(&state->state_lock);
1450 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1451 goto out_noupdate;
1452 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1453 goto out_noupdate;
1454 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1455 ret = true;
1456 out_noupdate:
1457 spin_unlock(&state->state_lock);
1458 return ret;
1459 }
1460
1461 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1462 {
1463 struct nfs_delegation *delegation;
1464
1465 rcu_read_lock();
1466 delegation = rcu_dereference(NFS_I(inode)->delegation);
1467 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1468 rcu_read_unlock();
1469 return;
1470 }
1471 rcu_read_unlock();
1472 nfs4_inode_return_delegation(inode);
1473 }
1474
1475 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1476 {
1477 struct nfs4_state *state = opendata->state;
1478 struct nfs_inode *nfsi = NFS_I(state->inode);
1479 struct nfs_delegation *delegation;
1480 int open_mode = opendata->o_arg.open_flags;
1481 fmode_t fmode = opendata->o_arg.fmode;
1482 enum open_claim_type4 claim = opendata->o_arg.claim;
1483 nfs4_stateid stateid;
1484 int ret = -EAGAIN;
1485
1486 for (;;) {
1487 spin_lock(&state->owner->so_lock);
1488 if (can_open_cached(state, fmode, open_mode)) {
1489 update_open_stateflags(state, fmode);
1490 spin_unlock(&state->owner->so_lock);
1491 goto out_return_state;
1492 }
1493 spin_unlock(&state->owner->so_lock);
1494 rcu_read_lock();
1495 delegation = rcu_dereference(nfsi->delegation);
1496 if (!can_open_delegated(delegation, fmode, claim)) {
1497 rcu_read_unlock();
1498 break;
1499 }
1500 /* Save the delegation */
1501 nfs4_stateid_copy(&stateid, &delegation->stateid);
1502 rcu_read_unlock();
1503 nfs_release_seqid(opendata->o_arg.seqid);
1504 if (!opendata->is_recover) {
1505 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1506 if (ret != 0)
1507 goto out;
1508 }
1509 ret = -EAGAIN;
1510
1511 /* Try to update the stateid using the delegation */
1512 if (update_open_stateid(state, NULL, &stateid, fmode))
1513 goto out_return_state;
1514 }
1515 out:
1516 return ERR_PTR(ret);
1517 out_return_state:
1518 atomic_inc(&state->count);
1519 return state;
1520 }
1521
1522 static void
1523 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1524 {
1525 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1526 struct nfs_delegation *delegation;
1527 int delegation_flags = 0;
1528
1529 rcu_read_lock();
1530 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1531 if (delegation)
1532 delegation_flags = delegation->flags;
1533 rcu_read_unlock();
1534 switch (data->o_arg.claim) {
1535 default:
1536 break;
1537 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1538 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1539 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1540 "returning a delegation for "
1541 "OPEN(CLAIM_DELEGATE_CUR)\n",
1542 clp->cl_hostname);
1543 return;
1544 }
1545 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1546 nfs_inode_set_delegation(state->inode,
1547 data->owner->so_cred,
1548 &data->o_res);
1549 else
1550 nfs_inode_reclaim_delegation(state->inode,
1551 data->owner->so_cred,
1552 &data->o_res);
1553 }
1554
1555 /*
1556 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1557 * and update the nfs4_state.
1558 */
1559 static struct nfs4_state *
1560 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1561 {
1562 struct inode *inode = data->state->inode;
1563 struct nfs4_state *state = data->state;
1564 int ret;
1565
1566 if (!data->rpc_done) {
1567 if (data->rpc_status) {
1568 ret = data->rpc_status;
1569 goto err;
1570 }
1571 /* cached opens have already been processed */
1572 goto update;
1573 }
1574
1575 ret = nfs_refresh_inode(inode, &data->f_attr);
1576 if (ret)
1577 goto err;
1578
1579 if (data->o_res.delegation_type != 0)
1580 nfs4_opendata_check_deleg(data, state);
1581 update:
1582 update_open_stateid(state, &data->o_res.stateid, NULL,
1583 data->o_arg.fmode);
1584 atomic_inc(&state->count);
1585
1586 return state;
1587 err:
1588 return ERR_PTR(ret);
1589
1590 }
1591
1592 static struct nfs4_state *
1593 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1594 {
1595 struct inode *inode;
1596 struct nfs4_state *state = NULL;
1597 int ret;
1598
1599 if (!data->rpc_done) {
1600 state = nfs4_try_open_cached(data);
1601 goto out;
1602 }
1603
1604 ret = -EAGAIN;
1605 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1606 goto err;
1607 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1608 ret = PTR_ERR(inode);
1609 if (IS_ERR(inode))
1610 goto err;
1611 ret = -ENOMEM;
1612 state = nfs4_get_open_state(inode, data->owner);
1613 if (state == NULL)
1614 goto err_put_inode;
1615 if (data->o_res.delegation_type != 0)
1616 nfs4_opendata_check_deleg(data, state);
1617 update_open_stateid(state, &data->o_res.stateid, NULL,
1618 data->o_arg.fmode);
1619 iput(inode);
1620 out:
1621 nfs_release_seqid(data->o_arg.seqid);
1622 return state;
1623 err_put_inode:
1624 iput(inode);
1625 err:
1626 return ERR_PTR(ret);
1627 }
1628
1629 static struct nfs4_state *
1630 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1631 {
1632 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1633 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1634 return _nfs4_opendata_to_nfs4_state(data);
1635 }
1636
1637 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1638 {
1639 struct nfs_inode *nfsi = NFS_I(state->inode);
1640 struct nfs_open_context *ctx;
1641
1642 spin_lock(&state->inode->i_lock);
1643 list_for_each_entry(ctx, &nfsi->open_files, list) {
1644 if (ctx->state != state)
1645 continue;
1646 get_nfs_open_context(ctx);
1647 spin_unlock(&state->inode->i_lock);
1648 return ctx;
1649 }
1650 spin_unlock(&state->inode->i_lock);
1651 return ERR_PTR(-ENOENT);
1652 }
1653
1654 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1655 struct nfs4_state *state, enum open_claim_type4 claim)
1656 {
1657 struct nfs4_opendata *opendata;
1658
1659 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1660 NULL, NULL, claim, GFP_NOFS);
1661 if (opendata == NULL)
1662 return ERR_PTR(-ENOMEM);
1663 opendata->state = state;
1664 atomic_inc(&state->count);
1665 return opendata;
1666 }
1667
1668 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
1669 fmode_t fmode)
1670 {
1671 struct nfs4_state *newstate;
1672 int ret;
1673
1674 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
1675 return 0;
1676 opendata->o_arg.open_flags = 0;
1677 opendata->o_arg.fmode = fmode;
1678 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1679 NFS_SB(opendata->dentry->d_sb),
1680 fmode, 0);
1681 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1682 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1683 nfs4_init_opendata_res(opendata);
1684 ret = _nfs4_recover_proc_open(opendata);
1685 if (ret != 0)
1686 return ret;
1687 newstate = nfs4_opendata_to_nfs4_state(opendata);
1688 if (IS_ERR(newstate))
1689 return PTR_ERR(newstate);
1690 if (newstate != opendata->state)
1691 ret = -ESTALE;
1692 nfs4_close_state(newstate, fmode);
1693 return ret;
1694 }
1695
1696 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1697 {
1698 int ret;
1699
1700 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1701 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1702 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1703 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1704 /* memory barrier prior to reading state->n_* */
1705 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1706 clear_bit(NFS_OPEN_STATE, &state->flags);
1707 smp_rmb();
1708 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1709 if (ret != 0)
1710 return ret;
1711 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1712 if (ret != 0)
1713 return ret;
1714 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
1715 if (ret != 0)
1716 return ret;
1717 /*
1718 * We may have performed cached opens for all three recoveries.
1719 * Check if we need to update the current stateid.
1720 */
1721 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1722 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1723 write_seqlock(&state->seqlock);
1724 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1725 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1726 write_sequnlock(&state->seqlock);
1727 }
1728 return 0;
1729 }
1730
1731 /*
1732 * OPEN_RECLAIM:
1733 * reclaim state on the server after a reboot.
1734 */
1735 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1736 {
1737 struct nfs_delegation *delegation;
1738 struct nfs4_opendata *opendata;
1739 fmode_t delegation_type = 0;
1740 int status;
1741
1742 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1743 NFS4_OPEN_CLAIM_PREVIOUS);
1744 if (IS_ERR(opendata))
1745 return PTR_ERR(opendata);
1746 rcu_read_lock();
1747 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1748 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1749 delegation_type = delegation->type;
1750 rcu_read_unlock();
1751 opendata->o_arg.u.delegation_type = delegation_type;
1752 status = nfs4_open_recover(opendata, state);
1753 nfs4_opendata_put(opendata);
1754 return status;
1755 }
1756
1757 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1758 {
1759 struct nfs_server *server = NFS_SERVER(state->inode);
1760 struct nfs4_exception exception = { };
1761 int err;
1762 do {
1763 err = _nfs4_do_open_reclaim(ctx, state);
1764 trace_nfs4_open_reclaim(ctx, 0, err);
1765 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1766 continue;
1767 if (err != -NFS4ERR_DELAY)
1768 break;
1769 nfs4_handle_exception(server, err, &exception);
1770 } while (exception.retry);
1771 return err;
1772 }
1773
1774 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1775 {
1776 struct nfs_open_context *ctx;
1777 int ret;
1778
1779 ctx = nfs4_state_find_open_context(state);
1780 if (IS_ERR(ctx))
1781 return -EAGAIN;
1782 ret = nfs4_do_open_reclaim(ctx, state);
1783 put_nfs_open_context(ctx);
1784 return ret;
1785 }
1786
1787 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1788 {
1789 switch (err) {
1790 default:
1791 printk(KERN_ERR "NFS: %s: unhandled error "
1792 "%d.\n", __func__, err);
1793 case 0:
1794 case -ENOENT:
1795 case -EAGAIN:
1796 case -ESTALE:
1797 break;
1798 case -NFS4ERR_BADSESSION:
1799 case -NFS4ERR_BADSLOT:
1800 case -NFS4ERR_BAD_HIGH_SLOT:
1801 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1802 case -NFS4ERR_DEADSESSION:
1803 set_bit(NFS_DELEGATED_STATE, &state->flags);
1804 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1805 return -EAGAIN;
1806 case -NFS4ERR_STALE_CLIENTID:
1807 case -NFS4ERR_STALE_STATEID:
1808 set_bit(NFS_DELEGATED_STATE, &state->flags);
1809 case -NFS4ERR_EXPIRED:
1810 /* Don't recall a delegation if it was lost */
1811 nfs4_schedule_lease_recovery(server->nfs_client);
1812 return -EAGAIN;
1813 case -NFS4ERR_MOVED:
1814 nfs4_schedule_migration_recovery(server);
1815 return -EAGAIN;
1816 case -NFS4ERR_LEASE_MOVED:
1817 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1818 return -EAGAIN;
1819 case -NFS4ERR_DELEG_REVOKED:
1820 case -NFS4ERR_ADMIN_REVOKED:
1821 case -NFS4ERR_BAD_STATEID:
1822 case -NFS4ERR_OPENMODE:
1823 nfs_inode_find_state_and_recover(state->inode,
1824 stateid);
1825 nfs4_schedule_stateid_recovery(server, state);
1826 return -EAGAIN;
1827 case -NFS4ERR_DELAY:
1828 case -NFS4ERR_GRACE:
1829 set_bit(NFS_DELEGATED_STATE, &state->flags);
1830 ssleep(1);
1831 return -EAGAIN;
1832 case -ENOMEM:
1833 case -NFS4ERR_DENIED:
1834 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1835 return 0;
1836 }
1837 return err;
1838 }
1839
1840 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
1841 struct nfs4_state *state, const nfs4_stateid *stateid,
1842 fmode_t type)
1843 {
1844 struct nfs_server *server = NFS_SERVER(state->inode);
1845 struct nfs4_opendata *opendata;
1846 int err = 0;
1847
1848 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1849 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1850 if (IS_ERR(opendata))
1851 return PTR_ERR(opendata);
1852 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1853 write_seqlock(&state->seqlock);
1854 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1855 write_sequnlock(&state->seqlock);
1856 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1857 switch (type & (FMODE_READ|FMODE_WRITE)) {
1858 case FMODE_READ|FMODE_WRITE:
1859 case FMODE_WRITE:
1860 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1861 if (err)
1862 break;
1863 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1864 if (err)
1865 break;
1866 case FMODE_READ:
1867 err = nfs4_open_recover_helper(opendata, FMODE_READ);
1868 }
1869 nfs4_opendata_put(opendata);
1870 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1871 }
1872
1873 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1874 {
1875 struct nfs4_opendata *data = calldata;
1876
1877 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1878 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1879 }
1880
1881 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1882 {
1883 struct nfs4_opendata *data = calldata;
1884
1885 nfs40_sequence_done(task, &data->c_res.seq_res);
1886
1887 data->rpc_status = task->tk_status;
1888 if (data->rpc_status == 0) {
1889 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1890 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1891 renew_lease(data->o_res.server, data->timestamp);
1892 data->rpc_done = 1;
1893 }
1894 }
1895
1896 static void nfs4_open_confirm_release(void *calldata)
1897 {
1898 struct nfs4_opendata *data = calldata;
1899 struct nfs4_state *state = NULL;
1900
1901 /* If this request hasn't been cancelled, do nothing */
1902 if (data->cancelled == 0)
1903 goto out_free;
1904 /* In case of error, no cleanup! */
1905 if (!data->rpc_done)
1906 goto out_free;
1907 state = nfs4_opendata_to_nfs4_state(data);
1908 if (!IS_ERR(state))
1909 nfs4_close_state(state, data->o_arg.fmode);
1910 out_free:
1911 nfs4_opendata_put(data);
1912 }
1913
1914 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1915 .rpc_call_prepare = nfs4_open_confirm_prepare,
1916 .rpc_call_done = nfs4_open_confirm_done,
1917 .rpc_release = nfs4_open_confirm_release,
1918 };
1919
1920 /*
1921 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1922 */
1923 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1924 {
1925 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
1926 struct rpc_task *task;
1927 struct rpc_message msg = {
1928 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1929 .rpc_argp = &data->c_arg,
1930 .rpc_resp = &data->c_res,
1931 .rpc_cred = data->owner->so_cred,
1932 };
1933 struct rpc_task_setup task_setup_data = {
1934 .rpc_client = server->client,
1935 .rpc_message = &msg,
1936 .callback_ops = &nfs4_open_confirm_ops,
1937 .callback_data = data,
1938 .workqueue = nfsiod_workqueue,
1939 .flags = RPC_TASK_ASYNC,
1940 };
1941 int status;
1942
1943 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1944 kref_get(&data->kref);
1945 data->rpc_done = 0;
1946 data->rpc_status = 0;
1947 data->timestamp = jiffies;
1948 if (data->is_recover)
1949 nfs4_set_sequence_privileged(&data->c_arg.seq_args);
1950 task = rpc_run_task(&task_setup_data);
1951 if (IS_ERR(task))
1952 return PTR_ERR(task);
1953 status = nfs4_wait_for_completion_rpc_task(task);
1954 if (status != 0) {
1955 data->cancelled = 1;
1956 smp_wmb();
1957 } else
1958 status = data->rpc_status;
1959 rpc_put_task(task);
1960 return status;
1961 }
1962
1963 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1964 {
1965 struct nfs4_opendata *data = calldata;
1966 struct nfs4_state_owner *sp = data->owner;
1967 struct nfs_client *clp = sp->so_server->nfs_client;
1968 enum open_claim_type4 claim = data->o_arg.claim;
1969
1970 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1971 goto out_wait;
1972 /*
1973 * Check if we still need to send an OPEN call, or if we can use
1974 * a delegation instead.
1975 */
1976 if (data->state != NULL) {
1977 struct nfs_delegation *delegation;
1978
1979 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1980 goto out_no_action;
1981 rcu_read_lock();
1982 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1983 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
1984 goto unlock_no_action;
1985 rcu_read_unlock();
1986 }
1987 /* Update client id. */
1988 data->o_arg.clientid = clp->cl_clientid;
1989 switch (claim) {
1990 default:
1991 break;
1992 case NFS4_OPEN_CLAIM_PREVIOUS:
1993 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1994 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1995 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1996 case NFS4_OPEN_CLAIM_FH:
1997 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1998 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1999 }
2000 data->timestamp = jiffies;
2001 if (nfs4_setup_sequence(data->o_arg.server,
2002 &data->o_arg.seq_args,
2003 &data->o_res.seq_res,
2004 task) != 0)
2005 nfs_release_seqid(data->o_arg.seqid);
2006
2007 /* Set the create mode (note dependency on the session type) */
2008 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2009 if (data->o_arg.open_flags & O_EXCL) {
2010 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2011 if (nfs4_has_persistent_session(clp))
2012 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2013 else if (clp->cl_mvops->minor_version > 0)
2014 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2015 }
2016 return;
2017 unlock_no_action:
2018 rcu_read_unlock();
2019 out_no_action:
2020 task->tk_action = NULL;
2021 out_wait:
2022 nfs4_sequence_done(task, &data->o_res.seq_res);
2023 }
2024
2025 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2026 {
2027 struct nfs4_opendata *data = calldata;
2028
2029 data->rpc_status = task->tk_status;
2030
2031 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
2032 return;
2033
2034 if (task->tk_status == 0) {
2035 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2036 switch (data->o_res.f_attr->mode & S_IFMT) {
2037 case S_IFREG:
2038 break;
2039 case S_IFLNK:
2040 data->rpc_status = -ELOOP;
2041 break;
2042 case S_IFDIR:
2043 data->rpc_status = -EISDIR;
2044 break;
2045 default:
2046 data->rpc_status = -ENOTDIR;
2047 }
2048 }
2049 renew_lease(data->o_res.server, data->timestamp);
2050 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2051 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2052 }
2053 data->rpc_done = 1;
2054 }
2055
2056 static void nfs4_open_release(void *calldata)
2057 {
2058 struct nfs4_opendata *data = calldata;
2059 struct nfs4_state *state = NULL;
2060
2061 /* If this request hasn't been cancelled, do nothing */
2062 if (data->cancelled == 0)
2063 goto out_free;
2064 /* In case of error, no cleanup! */
2065 if (data->rpc_status != 0 || !data->rpc_done)
2066 goto out_free;
2067 /* In case we need an open_confirm, no cleanup! */
2068 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2069 goto out_free;
2070 state = nfs4_opendata_to_nfs4_state(data);
2071 if (!IS_ERR(state))
2072 nfs4_close_state(state, data->o_arg.fmode);
2073 out_free:
2074 nfs4_opendata_put(data);
2075 }
2076
2077 static const struct rpc_call_ops nfs4_open_ops = {
2078 .rpc_call_prepare = nfs4_open_prepare,
2079 .rpc_call_done = nfs4_open_done,
2080 .rpc_release = nfs4_open_release,
2081 };
2082
2083 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
2084 {
2085 struct inode *dir = d_inode(data->dir);
2086 struct nfs_server *server = NFS_SERVER(dir);
2087 struct nfs_openargs *o_arg = &data->o_arg;
2088 struct nfs_openres *o_res = &data->o_res;
2089 struct rpc_task *task;
2090 struct rpc_message msg = {
2091 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2092 .rpc_argp = o_arg,
2093 .rpc_resp = o_res,
2094 .rpc_cred = data->owner->so_cred,
2095 };
2096 struct rpc_task_setup task_setup_data = {
2097 .rpc_client = server->client,
2098 .rpc_message = &msg,
2099 .callback_ops = &nfs4_open_ops,
2100 .callback_data = data,
2101 .workqueue = nfsiod_workqueue,
2102 .flags = RPC_TASK_ASYNC,
2103 };
2104 int status;
2105
2106 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
2107 kref_get(&data->kref);
2108 data->rpc_done = 0;
2109 data->rpc_status = 0;
2110 data->cancelled = 0;
2111 data->is_recover = 0;
2112 if (isrecover) {
2113 nfs4_set_sequence_privileged(&o_arg->seq_args);
2114 data->is_recover = 1;
2115 }
2116 task = rpc_run_task(&task_setup_data);
2117 if (IS_ERR(task))
2118 return PTR_ERR(task);
2119 status = nfs4_wait_for_completion_rpc_task(task);
2120 if (status != 0) {
2121 data->cancelled = 1;
2122 smp_wmb();
2123 } else
2124 status = data->rpc_status;
2125 rpc_put_task(task);
2126
2127 return status;
2128 }
2129
2130 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2131 {
2132 struct inode *dir = d_inode(data->dir);
2133 struct nfs_openres *o_res = &data->o_res;
2134 int status;
2135
2136 status = nfs4_run_open_task(data, 1);
2137 if (status != 0 || !data->rpc_done)
2138 return status;
2139
2140 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2141
2142 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2143 status = _nfs4_proc_open_confirm(data);
2144 if (status != 0)
2145 return status;
2146 }
2147
2148 return status;
2149 }
2150
2151 /*
2152 * Additional permission checks in order to distinguish between an
2153 * open for read, and an open for execute. This works around the
2154 * fact that NFSv4 OPEN treats read and execute permissions as being
2155 * the same.
2156 * Note that in the non-execute case, we want to turn off permission
2157 * checking if we just created a new file (POSIX open() semantics).
2158 */
2159 static int nfs4_opendata_access(struct rpc_cred *cred,
2160 struct nfs4_opendata *opendata,
2161 struct nfs4_state *state, fmode_t fmode,
2162 int openflags)
2163 {
2164 struct nfs_access_entry cache;
2165 u32 mask;
2166
2167 /* access call failed or for some reason the server doesn't
2168 * support any access modes -- defer access call until later */
2169 if (opendata->o_res.access_supported == 0)
2170 return 0;
2171
2172 mask = 0;
2173 /*
2174 * Use openflags to check for exec, because fmode won't
2175 * always have FMODE_EXEC set when file open for exec.
2176 */
2177 if (openflags & __FMODE_EXEC) {
2178 /* ONLY check for exec rights */
2179 mask = MAY_EXEC;
2180 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2181 mask = MAY_READ;
2182
2183 cache.cred = cred;
2184 cache.jiffies = jiffies;
2185 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2186 nfs_access_add_cache(state->inode, &cache);
2187
2188 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2189 return 0;
2190
2191 /* even though OPEN succeeded, access is denied. Close the file */
2192 nfs4_close_state(state, fmode);
2193 return -EACCES;
2194 }
2195
2196 /*
2197 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2198 */
2199 static int _nfs4_proc_open(struct nfs4_opendata *data)
2200 {
2201 struct inode *dir = d_inode(data->dir);
2202 struct nfs_server *server = NFS_SERVER(dir);
2203 struct nfs_openargs *o_arg = &data->o_arg;
2204 struct nfs_openres *o_res = &data->o_res;
2205 int status;
2206
2207 status = nfs4_run_open_task(data, 0);
2208 if (!data->rpc_done)
2209 return status;
2210 if (status != 0) {
2211 if (status == -NFS4ERR_BADNAME &&
2212 !(o_arg->open_flags & O_CREAT))
2213 return -ENOENT;
2214 return status;
2215 }
2216
2217 nfs_fattr_map_and_free_names(server, &data->f_attr);
2218
2219 if (o_arg->open_flags & O_CREAT) {
2220 update_changeattr(dir, &o_res->cinfo);
2221 if (o_arg->open_flags & O_EXCL)
2222 data->file_created = 1;
2223 else if (o_res->cinfo.before != o_res->cinfo.after)
2224 data->file_created = 1;
2225 }
2226 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2227 server->caps &= ~NFS_CAP_POSIX_LOCK;
2228 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2229 status = _nfs4_proc_open_confirm(data);
2230 if (status != 0)
2231 return status;
2232 }
2233 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2234 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2235 return 0;
2236 }
2237
2238 static int nfs4_recover_expired_lease(struct nfs_server *server)
2239 {
2240 return nfs4_client_recover_expired_lease(server->nfs_client);
2241 }
2242
2243 /*
2244 * OPEN_EXPIRED:
2245 * reclaim state on the server after a network partition.
2246 * Assumes caller holds the appropriate lock
2247 */
2248 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2249 {
2250 struct nfs4_opendata *opendata;
2251 int ret;
2252
2253 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2254 NFS4_OPEN_CLAIM_FH);
2255 if (IS_ERR(opendata))
2256 return PTR_ERR(opendata);
2257 ret = nfs4_open_recover(opendata, state);
2258 if (ret == -ESTALE)
2259 d_drop(ctx->dentry);
2260 nfs4_opendata_put(opendata);
2261 return ret;
2262 }
2263
2264 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2265 {
2266 struct nfs_server *server = NFS_SERVER(state->inode);
2267 struct nfs4_exception exception = { };
2268 int err;
2269
2270 do {
2271 err = _nfs4_open_expired(ctx, state);
2272 trace_nfs4_open_expired(ctx, 0, err);
2273 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2274 continue;
2275 switch (err) {
2276 default:
2277 goto out;
2278 case -NFS4ERR_GRACE:
2279 case -NFS4ERR_DELAY:
2280 nfs4_handle_exception(server, err, &exception);
2281 err = 0;
2282 }
2283 } while (exception.retry);
2284 out:
2285 return err;
2286 }
2287
2288 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2289 {
2290 struct nfs_open_context *ctx;
2291 int ret;
2292
2293 ctx = nfs4_state_find_open_context(state);
2294 if (IS_ERR(ctx))
2295 return -EAGAIN;
2296 ret = nfs4_do_open_expired(ctx, state);
2297 put_nfs_open_context(ctx);
2298 return ret;
2299 }
2300
2301 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2302 {
2303 nfs_remove_bad_delegation(state->inode);
2304 write_seqlock(&state->seqlock);
2305 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2306 write_sequnlock(&state->seqlock);
2307 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2308 }
2309
2310 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2311 {
2312 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2313 nfs_finish_clear_delegation_stateid(state);
2314 }
2315
2316 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2317 {
2318 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2319 nfs40_clear_delegation_stateid(state);
2320 return nfs4_open_expired(sp, state);
2321 }
2322
2323 #if defined(CONFIG_NFS_V4_1)
2324 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2325 {
2326 struct nfs_server *server = NFS_SERVER(state->inode);
2327 nfs4_stateid stateid;
2328 struct nfs_delegation *delegation;
2329 struct rpc_cred *cred;
2330 int status;
2331
2332 /* Get the delegation credential for use by test/free_stateid */
2333 rcu_read_lock();
2334 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2335 if (delegation == NULL) {
2336 rcu_read_unlock();
2337 return;
2338 }
2339
2340 nfs4_stateid_copy(&stateid, &delegation->stateid);
2341 cred = get_rpccred(delegation->cred);
2342 rcu_read_unlock();
2343 status = nfs41_test_stateid(server, &stateid, cred);
2344 trace_nfs4_test_delegation_stateid(state, NULL, status);
2345
2346 if (status != NFS_OK) {
2347 /* Free the stateid unless the server explicitly
2348 * informs us the stateid is unrecognized. */
2349 if (status != -NFS4ERR_BAD_STATEID)
2350 nfs41_free_stateid(server, &stateid, cred);
2351 nfs_finish_clear_delegation_stateid(state);
2352 }
2353
2354 put_rpccred(cred);
2355 }
2356
2357 /**
2358 * nfs41_check_open_stateid - possibly free an open stateid
2359 *
2360 * @state: NFSv4 state for an inode
2361 *
2362 * Returns NFS_OK if recovery for this stateid is now finished.
2363 * Otherwise a negative NFS4ERR value is returned.
2364 */
2365 static int nfs41_check_open_stateid(struct nfs4_state *state)
2366 {
2367 struct nfs_server *server = NFS_SERVER(state->inode);
2368 nfs4_stateid *stateid = &state->open_stateid;
2369 struct rpc_cred *cred = state->owner->so_cred;
2370 int status;
2371
2372 /* If a state reset has been done, test_stateid is unneeded */
2373 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2374 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2375 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2376 return -NFS4ERR_BAD_STATEID;
2377
2378 status = nfs41_test_stateid(server, stateid, cred);
2379 trace_nfs4_test_open_stateid(state, NULL, status);
2380 if (status != NFS_OK) {
2381 /* Free the stateid unless the server explicitly
2382 * informs us the stateid is unrecognized. */
2383 if (status != -NFS4ERR_BAD_STATEID)
2384 nfs41_free_stateid(server, stateid, cred);
2385
2386 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2387 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2388 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2389 clear_bit(NFS_OPEN_STATE, &state->flags);
2390 }
2391 return status;
2392 }
2393
2394 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2395 {
2396 int status;
2397
2398 nfs41_check_delegation_stateid(state);
2399 status = nfs41_check_open_stateid(state);
2400 if (status != NFS_OK)
2401 status = nfs4_open_expired(sp, state);
2402 return status;
2403 }
2404 #endif
2405
2406 /*
2407 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2408 * fields corresponding to attributes that were used to store the verifier.
2409 * Make sure we clobber those fields in the later setattr call
2410 */
2411 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2412 struct iattr *sattr, struct nfs4_label **label)
2413 {
2414 const u32 *attrset = opendata->o_res.attrset;
2415
2416 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2417 !(sattr->ia_valid & ATTR_ATIME_SET))
2418 sattr->ia_valid |= ATTR_ATIME;
2419
2420 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2421 !(sattr->ia_valid & ATTR_MTIME_SET))
2422 sattr->ia_valid |= ATTR_MTIME;
2423
2424 /* Except MODE, it seems harmless of setting twice. */
2425 if ((attrset[1] & FATTR4_WORD1_MODE))
2426 sattr->ia_valid &= ~ATTR_MODE;
2427
2428 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
2429 *label = NULL;
2430 }
2431
2432 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2433 fmode_t fmode,
2434 int flags,
2435 struct nfs_open_context *ctx)
2436 {
2437 struct nfs4_state_owner *sp = opendata->owner;
2438 struct nfs_server *server = sp->so_server;
2439 struct dentry *dentry;
2440 struct nfs4_state *state;
2441 unsigned int seq;
2442 int ret;
2443
2444 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2445
2446 ret = _nfs4_proc_open(opendata);
2447 if (ret != 0)
2448 goto out;
2449
2450 state = nfs4_opendata_to_nfs4_state(opendata);
2451 ret = PTR_ERR(state);
2452 if (IS_ERR(state))
2453 goto out;
2454 if (server->caps & NFS_CAP_POSIX_LOCK)
2455 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2456
2457 dentry = opendata->dentry;
2458 if (d_really_is_negative(dentry)) {
2459 /* FIXME: Is this d_drop() ever needed? */
2460 d_drop(dentry);
2461 dentry = d_add_unique(dentry, igrab(state->inode));
2462 if (dentry == NULL) {
2463 dentry = opendata->dentry;
2464 } else if (dentry != ctx->dentry) {
2465 dput(ctx->dentry);
2466 ctx->dentry = dget(dentry);
2467 }
2468 nfs_set_verifier(dentry,
2469 nfs_save_change_attribute(d_inode(opendata->dir)));
2470 }
2471
2472 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2473 if (ret != 0)
2474 goto out;
2475
2476 ctx->state = state;
2477 if (d_inode(dentry) == state->inode) {
2478 nfs_inode_attach_open_context(ctx);
2479 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2480 nfs4_schedule_stateid_recovery(server, state);
2481 }
2482 out:
2483 return ret;
2484 }
2485
2486 /*
2487 * Returns a referenced nfs4_state
2488 */
2489 static int _nfs4_do_open(struct inode *dir,
2490 struct nfs_open_context *ctx,
2491 int flags,
2492 struct iattr *sattr,
2493 struct nfs4_label *label,
2494 int *opened)
2495 {
2496 struct nfs4_state_owner *sp;
2497 struct nfs4_state *state = NULL;
2498 struct nfs_server *server = NFS_SERVER(dir);
2499 struct nfs4_opendata *opendata;
2500 struct dentry *dentry = ctx->dentry;
2501 struct rpc_cred *cred = ctx->cred;
2502 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2503 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2504 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2505 struct nfs4_label *olabel = NULL;
2506 int status;
2507
2508 /* Protect against reboot recovery conflicts */
2509 status = -ENOMEM;
2510 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2511 if (sp == NULL) {
2512 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2513 goto out_err;
2514 }
2515 status = nfs4_recover_expired_lease(server);
2516 if (status != 0)
2517 goto err_put_state_owner;
2518 if (d_really_is_positive(dentry))
2519 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2520 status = -ENOMEM;
2521 if (d_really_is_positive(dentry))
2522 claim = NFS4_OPEN_CLAIM_FH;
2523 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2524 label, claim, GFP_KERNEL);
2525 if (opendata == NULL)
2526 goto err_put_state_owner;
2527
2528 if (label) {
2529 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2530 if (IS_ERR(olabel)) {
2531 status = PTR_ERR(olabel);
2532 goto err_opendata_put;
2533 }
2534 }
2535
2536 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2537 if (!opendata->f_attr.mdsthreshold) {
2538 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2539 if (!opendata->f_attr.mdsthreshold)
2540 goto err_free_label;
2541 }
2542 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2543 }
2544 if (d_really_is_positive(dentry))
2545 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2546
2547 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2548 if (status != 0)
2549 goto err_free_label;
2550 state = ctx->state;
2551
2552 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
2553 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2554 nfs4_exclusive_attrset(opendata, sattr, &label);
2555
2556 nfs_fattr_init(opendata->o_res.f_attr);
2557 status = nfs4_do_setattr(state->inode, cred,
2558 opendata->o_res.f_attr, sattr,
2559 state, label, olabel);
2560 if (status == 0) {
2561 nfs_setattr_update_inode(state->inode, sattr,
2562 opendata->o_res.f_attr);
2563 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2564 }
2565 }
2566 if (opened && opendata->file_created)
2567 *opened |= FILE_CREATED;
2568
2569 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2570 *ctx_th = opendata->f_attr.mdsthreshold;
2571 opendata->f_attr.mdsthreshold = NULL;
2572 }
2573
2574 nfs4_label_free(olabel);
2575
2576 nfs4_opendata_put(opendata);
2577 nfs4_put_state_owner(sp);
2578 return 0;
2579 err_free_label:
2580 nfs4_label_free(olabel);
2581 err_opendata_put:
2582 nfs4_opendata_put(opendata);
2583 err_put_state_owner:
2584 nfs4_put_state_owner(sp);
2585 out_err:
2586 return status;
2587 }
2588
2589
2590 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2591 struct nfs_open_context *ctx,
2592 int flags,
2593 struct iattr *sattr,
2594 struct nfs4_label *label,
2595 int *opened)
2596 {
2597 struct nfs_server *server = NFS_SERVER(dir);
2598 struct nfs4_exception exception = { };
2599 struct nfs4_state *res;
2600 int status;
2601
2602 do {
2603 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2604 res = ctx->state;
2605 trace_nfs4_open_file(ctx, flags, status);
2606 if (status == 0)
2607 break;
2608 /* NOTE: BAD_SEQID means the server and client disagree about the
2609 * book-keeping w.r.t. state-changing operations
2610 * (OPEN/CLOSE/LOCK/LOCKU...)
2611 * It is actually a sign of a bug on the client or on the server.
2612 *
2613 * If we receive a BAD_SEQID error in the particular case of
2614 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2615 * have unhashed the old state_owner for us, and that we can
2616 * therefore safely retry using a new one. We should still warn
2617 * the user though...
2618 */
2619 if (status == -NFS4ERR_BAD_SEQID) {
2620 pr_warn_ratelimited("NFS: v4 server %s "
2621 " returned a bad sequence-id error!\n",
2622 NFS_SERVER(dir)->nfs_client->cl_hostname);
2623 exception.retry = 1;
2624 continue;
2625 }
2626 /*
2627 * BAD_STATEID on OPEN means that the server cancelled our
2628 * state before it received the OPEN_CONFIRM.
2629 * Recover by retrying the request as per the discussion
2630 * on Page 181 of RFC3530.
2631 */
2632 if (status == -NFS4ERR_BAD_STATEID) {
2633 exception.retry = 1;
2634 continue;
2635 }
2636 if (status == -EAGAIN) {
2637 /* We must have found a delegation */
2638 exception.retry = 1;
2639 continue;
2640 }
2641 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2642 continue;
2643 res = ERR_PTR(nfs4_handle_exception(server,
2644 status, &exception));
2645 } while (exception.retry);
2646 return res;
2647 }
2648
2649 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2650 struct nfs_fattr *fattr, struct iattr *sattr,
2651 struct nfs4_state *state, struct nfs4_label *ilabel,
2652 struct nfs4_label *olabel)
2653 {
2654 struct nfs_server *server = NFS_SERVER(inode);
2655 struct nfs_setattrargs arg = {
2656 .fh = NFS_FH(inode),
2657 .iap = sattr,
2658 .server = server,
2659 .bitmask = server->attr_bitmask,
2660 .label = ilabel,
2661 };
2662 struct nfs_setattrres res = {
2663 .fattr = fattr,
2664 .label = olabel,
2665 .server = server,
2666 };
2667 struct rpc_message msg = {
2668 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2669 .rpc_argp = &arg,
2670 .rpc_resp = &res,
2671 .rpc_cred = cred,
2672 };
2673 unsigned long timestamp = jiffies;
2674 fmode_t fmode;
2675 bool truncate;
2676 int status;
2677
2678 arg.bitmask = nfs4_bitmask(server, ilabel);
2679 if (ilabel)
2680 arg.bitmask = nfs4_bitmask(server, olabel);
2681
2682 nfs_fattr_init(fattr);
2683
2684 /* Servers should only apply open mode checks for file size changes */
2685 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2686 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2687
2688 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2689 /* Use that stateid */
2690 } else if (truncate && state != NULL) {
2691 struct nfs_lockowner lockowner = {
2692 .l_owner = current->files,
2693 .l_pid = current->tgid,
2694 };
2695 if (!nfs4_valid_open_stateid(state))
2696 return -EBADF;
2697 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2698 &lockowner) == -EIO)
2699 return -EBADF;
2700 } else
2701 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2702
2703 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2704 if (status == 0 && state != NULL)
2705 renew_lease(server, timestamp);
2706 return status;
2707 }
2708
2709 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2710 struct nfs_fattr *fattr, struct iattr *sattr,
2711 struct nfs4_state *state, struct nfs4_label *ilabel,
2712 struct nfs4_label *olabel)
2713 {
2714 struct nfs_server *server = NFS_SERVER(inode);
2715 struct nfs4_exception exception = {
2716 .state = state,
2717 .inode = inode,
2718 };
2719 int err;
2720 do {
2721 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
2722 trace_nfs4_setattr(inode, err);
2723 switch (err) {
2724 case -NFS4ERR_OPENMODE:
2725 if (!(sattr->ia_valid & ATTR_SIZE)) {
2726 pr_warn_once("NFSv4: server %s is incorrectly "
2727 "applying open mode checks to "
2728 "a SETATTR that is not "
2729 "changing file size.\n",
2730 server->nfs_client->cl_hostname);
2731 }
2732 if (state && !(state->state & FMODE_WRITE)) {
2733 err = -EBADF;
2734 if (sattr->ia_valid & ATTR_OPEN)
2735 err = -EACCES;
2736 goto out;
2737 }
2738 }
2739 err = nfs4_handle_exception(server, err, &exception);
2740 } while (exception.retry);
2741 out:
2742 return err;
2743 }
2744
2745 static bool
2746 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
2747 {
2748 if (inode == NULL || !nfs_have_layout(inode))
2749 return false;
2750
2751 return pnfs_wait_on_layoutreturn(inode, task);
2752 }
2753
2754 struct nfs4_closedata {
2755 struct inode *inode;
2756 struct nfs4_state *state;
2757 struct nfs_closeargs arg;
2758 struct nfs_closeres res;
2759 struct nfs_fattr fattr;
2760 unsigned long timestamp;
2761 bool roc;
2762 u32 roc_barrier;
2763 };
2764
2765 static void nfs4_free_closedata(void *data)
2766 {
2767 struct nfs4_closedata *calldata = data;
2768 struct nfs4_state_owner *sp = calldata->state->owner;
2769 struct super_block *sb = calldata->state->inode->i_sb;
2770
2771 if (calldata->roc)
2772 pnfs_roc_release(calldata->state->inode);
2773 nfs4_put_open_state(calldata->state);
2774 nfs_free_seqid(calldata->arg.seqid);
2775 nfs4_put_state_owner(sp);
2776 nfs_sb_deactive(sb);
2777 kfree(calldata);
2778 }
2779
2780 static void nfs4_close_done(struct rpc_task *task, void *data)
2781 {
2782 struct nfs4_closedata *calldata = data;
2783 struct nfs4_state *state = calldata->state;
2784 struct nfs_server *server = NFS_SERVER(calldata->inode);
2785 nfs4_stateid *res_stateid = NULL;
2786
2787 dprintk("%s: begin!\n", __func__);
2788 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2789 return;
2790 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2791 /* hmm. we are done with the inode, and in the process of freeing
2792 * the state_owner. we keep this around to process errors
2793 */
2794 switch (task->tk_status) {
2795 case 0:
2796 res_stateid = &calldata->res.stateid;
2797 if (calldata->roc)
2798 pnfs_roc_set_barrier(state->inode,
2799 calldata->roc_barrier);
2800 renew_lease(server, calldata->timestamp);
2801 break;
2802 case -NFS4ERR_ADMIN_REVOKED:
2803 case -NFS4ERR_STALE_STATEID:
2804 case -NFS4ERR_OLD_STATEID:
2805 case -NFS4ERR_BAD_STATEID:
2806 case -NFS4ERR_EXPIRED:
2807 if (!nfs4_stateid_match(&calldata->arg.stateid,
2808 &state->open_stateid)) {
2809 rpc_restart_call_prepare(task);
2810 goto out_release;
2811 }
2812 if (calldata->arg.fmode == 0)
2813 break;
2814 default:
2815 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2816 rpc_restart_call_prepare(task);
2817 goto out_release;
2818 }
2819 }
2820 nfs_clear_open_stateid(state, &calldata->arg.stateid,
2821 res_stateid, calldata->arg.fmode);
2822 out_release:
2823 nfs_release_seqid(calldata->arg.seqid);
2824 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2825 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2826 }
2827
2828 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2829 {
2830 struct nfs4_closedata *calldata = data;
2831 struct nfs4_state *state = calldata->state;
2832 struct inode *inode = calldata->inode;
2833 bool is_rdonly, is_wronly, is_rdwr;
2834 int call_close = 0;
2835
2836 dprintk("%s: begin!\n", __func__);
2837 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2838 goto out_wait;
2839
2840 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2841 spin_lock(&state->owner->so_lock);
2842 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2843 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2844 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2845 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2846 /* Calculate the change in open mode */
2847 calldata->arg.fmode = 0;
2848 if (state->n_rdwr == 0) {
2849 if (state->n_rdonly == 0)
2850 call_close |= is_rdonly;
2851 else if (is_rdonly)
2852 calldata->arg.fmode |= FMODE_READ;
2853 if (state->n_wronly == 0)
2854 call_close |= is_wronly;
2855 else if (is_wronly)
2856 calldata->arg.fmode |= FMODE_WRITE;
2857 } else if (is_rdwr)
2858 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2859
2860 if (calldata->arg.fmode == 0)
2861 call_close |= is_rdwr;
2862
2863 if (!nfs4_valid_open_stateid(state))
2864 call_close = 0;
2865 spin_unlock(&state->owner->so_lock);
2866
2867 if (!call_close) {
2868 /* Note: exit _without_ calling nfs4_close_done */
2869 goto out_no_action;
2870 }
2871
2872 if (nfs4_wait_on_layoutreturn(inode, task)) {
2873 nfs_release_seqid(calldata->arg.seqid);
2874 goto out_wait;
2875 }
2876
2877 if (calldata->arg.fmode == 0)
2878 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2879 if (calldata->roc)
2880 pnfs_roc_get_barrier(inode, &calldata->roc_barrier);
2881
2882 calldata->arg.share_access =
2883 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2884 calldata->arg.fmode, 0);
2885
2886 nfs_fattr_init(calldata->res.fattr);
2887 calldata->timestamp = jiffies;
2888 if (nfs4_setup_sequence(NFS_SERVER(inode),
2889 &calldata->arg.seq_args,
2890 &calldata->res.seq_res,
2891 task) != 0)
2892 nfs_release_seqid(calldata->arg.seqid);
2893 dprintk("%s: done!\n", __func__);
2894 return;
2895 out_no_action:
2896 task->tk_action = NULL;
2897 out_wait:
2898 nfs4_sequence_done(task, &calldata->res.seq_res);
2899 }
2900
2901 static const struct rpc_call_ops nfs4_close_ops = {
2902 .rpc_call_prepare = nfs4_close_prepare,
2903 .rpc_call_done = nfs4_close_done,
2904 .rpc_release = nfs4_free_closedata,
2905 };
2906
2907 static bool nfs4_roc(struct inode *inode)
2908 {
2909 if (!nfs_have_layout(inode))
2910 return false;
2911 return pnfs_roc(inode);
2912 }
2913
2914 /*
2915 * It is possible for data to be read/written from a mem-mapped file
2916 * after the sys_close call (which hits the vfs layer as a flush).
2917 * This means that we can't safely call nfsv4 close on a file until
2918 * the inode is cleared. This in turn means that we are not good
2919 * NFSv4 citizens - we do not indicate to the server to update the file's
2920 * share state even when we are done with one of the three share
2921 * stateid's in the inode.
2922 *
2923 * NOTE: Caller must be holding the sp->so_owner semaphore!
2924 */
2925 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2926 {
2927 struct nfs_server *server = NFS_SERVER(state->inode);
2928 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
2929 struct nfs4_closedata *calldata;
2930 struct nfs4_state_owner *sp = state->owner;
2931 struct rpc_task *task;
2932 struct rpc_message msg = {
2933 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2934 .rpc_cred = state->owner->so_cred,
2935 };
2936 struct rpc_task_setup task_setup_data = {
2937 .rpc_client = server->client,
2938 .rpc_message = &msg,
2939 .callback_ops = &nfs4_close_ops,
2940 .workqueue = nfsiod_workqueue,
2941 .flags = RPC_TASK_ASYNC,
2942 };
2943 int status = -ENOMEM;
2944
2945 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
2946 &task_setup_data.rpc_client, &msg);
2947
2948 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2949 if (calldata == NULL)
2950 goto out;
2951 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2952 calldata->inode = state->inode;
2953 calldata->state = state;
2954 calldata->arg.fh = NFS_FH(state->inode);
2955 /* Serialization for the sequence id */
2956 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
2957 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
2958 if (IS_ERR(calldata->arg.seqid))
2959 goto out_free_calldata;
2960 calldata->arg.fmode = 0;
2961 calldata->arg.bitmask = server->cache_consistency_bitmask;
2962 calldata->res.fattr = &calldata->fattr;
2963 calldata->res.seqid = calldata->arg.seqid;
2964 calldata->res.server = server;
2965 calldata->roc = nfs4_roc(state->inode);
2966 nfs_sb_active(calldata->inode->i_sb);
2967
2968 msg.rpc_argp = &calldata->arg;
2969 msg.rpc_resp = &calldata->res;
2970 task_setup_data.callback_data = calldata;
2971 task = rpc_run_task(&task_setup_data);
2972 if (IS_ERR(task))
2973 return PTR_ERR(task);
2974 status = 0;
2975 if (wait)
2976 status = rpc_wait_for_completion_task(task);
2977 rpc_put_task(task);
2978 return status;
2979 out_free_calldata:
2980 kfree(calldata);
2981 out:
2982 nfs4_put_open_state(state);
2983 nfs4_put_state_owner(sp);
2984 return status;
2985 }
2986
2987 static struct inode *
2988 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2989 int open_flags, struct iattr *attr, int *opened)
2990 {
2991 struct nfs4_state *state;
2992 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
2993
2994 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2995
2996 /* Protect against concurrent sillydeletes */
2997 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
2998
2999 nfs4_label_release_security(label);
3000
3001 if (IS_ERR(state))
3002 return ERR_CAST(state);
3003 return state->inode;
3004 }
3005
3006 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3007 {
3008 if (ctx->state == NULL)
3009 return;
3010 if (is_sync)
3011 nfs4_close_sync(ctx->state, ctx->mode);
3012 else
3013 nfs4_close_state(ctx->state, ctx->mode);
3014 }
3015
3016 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3017 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3018 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
3019
3020 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3021 {
3022 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3023 struct nfs4_server_caps_arg args = {
3024 .fhandle = fhandle,
3025 .bitmask = bitmask,
3026 };
3027 struct nfs4_server_caps_res res = {};
3028 struct rpc_message msg = {
3029 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3030 .rpc_argp = &args,
3031 .rpc_resp = &res,
3032 };
3033 int status;
3034
3035 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3036 FATTR4_WORD0_FH_EXPIRE_TYPE |
3037 FATTR4_WORD0_LINK_SUPPORT |
3038 FATTR4_WORD0_SYMLINK_SUPPORT |
3039 FATTR4_WORD0_ACLSUPPORT;
3040 if (minorversion)
3041 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3042
3043 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3044 if (status == 0) {
3045 /* Sanity check the server answers */
3046 switch (minorversion) {
3047 case 0:
3048 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3049 res.attr_bitmask[2] = 0;
3050 break;
3051 case 1:
3052 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3053 break;
3054 case 2:
3055 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3056 }
3057 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3058 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
3059 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
3060 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
3061 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
3062 NFS_CAP_CTIME|NFS_CAP_MTIME|
3063 NFS_CAP_SECURITY_LABEL);
3064 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3065 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3066 server->caps |= NFS_CAP_ACLS;
3067 if (res.has_links != 0)
3068 server->caps |= NFS_CAP_HARDLINKS;
3069 if (res.has_symlinks != 0)
3070 server->caps |= NFS_CAP_SYMLINKS;
3071 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
3072 server->caps |= NFS_CAP_FILEID;
3073 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
3074 server->caps |= NFS_CAP_MODE;
3075 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
3076 server->caps |= NFS_CAP_NLINK;
3077 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
3078 server->caps |= NFS_CAP_OWNER;
3079 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
3080 server->caps |= NFS_CAP_OWNER_GROUP;
3081 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
3082 server->caps |= NFS_CAP_ATIME;
3083 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
3084 server->caps |= NFS_CAP_CTIME;
3085 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
3086 server->caps |= NFS_CAP_MTIME;
3087 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3088 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3089 server->caps |= NFS_CAP_SECURITY_LABEL;
3090 #endif
3091 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3092 sizeof(server->attr_bitmask));
3093 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3094
3095 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3096 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3097 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3098 server->cache_consistency_bitmask[2] = 0;
3099 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3100 sizeof(server->exclcreat_bitmask));
3101 server->acl_bitmask = res.acl_bitmask;
3102 server->fh_expire_type = res.fh_expire_type;
3103 }
3104
3105 return status;
3106 }
3107
3108 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3109 {
3110 struct nfs4_exception exception = { };
3111 int err;
3112 do {
3113 err = nfs4_handle_exception(server,
3114 _nfs4_server_capabilities(server, fhandle),
3115 &exception);
3116 } while (exception.retry);
3117 return err;
3118 }
3119
3120 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3121 struct nfs_fsinfo *info)
3122 {
3123 u32 bitmask[3];
3124 struct nfs4_lookup_root_arg args = {
3125 .bitmask = bitmask,
3126 };
3127 struct nfs4_lookup_res res = {
3128 .server = server,
3129 .fattr = info->fattr,
3130 .fh = fhandle,
3131 };
3132 struct rpc_message msg = {
3133 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3134 .rpc_argp = &args,
3135 .rpc_resp = &res,
3136 };
3137
3138 bitmask[0] = nfs4_fattr_bitmap[0];
3139 bitmask[1] = nfs4_fattr_bitmap[1];
3140 /*
3141 * Process the label in the upcoming getfattr
3142 */
3143 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3144
3145 nfs_fattr_init(info->fattr);
3146 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3147 }
3148
3149 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3150 struct nfs_fsinfo *info)
3151 {
3152 struct nfs4_exception exception = { };
3153 int err;
3154 do {
3155 err = _nfs4_lookup_root(server, fhandle, info);
3156 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3157 switch (err) {
3158 case 0:
3159 case -NFS4ERR_WRONGSEC:
3160 goto out;
3161 default:
3162 err = nfs4_handle_exception(server, err, &exception);
3163 }
3164 } while (exception.retry);
3165 out:
3166 return err;
3167 }
3168
3169 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3170 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3171 {
3172 struct rpc_auth_create_args auth_args = {
3173 .pseudoflavor = flavor,
3174 };
3175 struct rpc_auth *auth;
3176 int ret;
3177
3178 auth = rpcauth_create(&auth_args, server->client);
3179 if (IS_ERR(auth)) {
3180 ret = -EACCES;
3181 goto out;
3182 }
3183 ret = nfs4_lookup_root(server, fhandle, info);
3184 out:
3185 return ret;
3186 }
3187
3188 /*
3189 * Retry pseudoroot lookup with various security flavors. We do this when:
3190 *
3191 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3192 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3193 *
3194 * Returns zero on success, or a negative NFS4ERR value, or a
3195 * negative errno value.
3196 */
3197 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3198 struct nfs_fsinfo *info)
3199 {
3200 /* Per 3530bis 15.33.5 */
3201 static const rpc_authflavor_t flav_array[] = {
3202 RPC_AUTH_GSS_KRB5P,
3203 RPC_AUTH_GSS_KRB5I,
3204 RPC_AUTH_GSS_KRB5,
3205 RPC_AUTH_UNIX, /* courtesy */
3206 RPC_AUTH_NULL,
3207 };
3208 int status = -EPERM;
3209 size_t i;
3210
3211 if (server->auth_info.flavor_len > 0) {
3212 /* try each flavor specified by user */
3213 for (i = 0; i < server->auth_info.flavor_len; i++) {
3214 status = nfs4_lookup_root_sec(server, fhandle, info,
3215 server->auth_info.flavors[i]);
3216 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3217 continue;
3218 break;
3219 }
3220 } else {
3221 /* no flavors specified by user, try default list */
3222 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3223 status = nfs4_lookup_root_sec(server, fhandle, info,
3224 flav_array[i]);
3225 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3226 continue;
3227 break;
3228 }
3229 }
3230
3231 /*
3232 * -EACCESS could mean that the user doesn't have correct permissions
3233 * to access the mount. It could also mean that we tried to mount
3234 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3235 * existing mount programs don't handle -EACCES very well so it should
3236 * be mapped to -EPERM instead.
3237 */
3238 if (status == -EACCES)
3239 status = -EPERM;
3240 return status;
3241 }
3242
3243 static int nfs4_do_find_root_sec(struct nfs_server *server,
3244 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
3245 {
3246 int mv = server->nfs_client->cl_minorversion;
3247 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
3248 }
3249
3250 /**
3251 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3252 * @server: initialized nfs_server handle
3253 * @fhandle: we fill in the pseudo-fs root file handle
3254 * @info: we fill in an FSINFO struct
3255 * @auth_probe: probe the auth flavours
3256 *
3257 * Returns zero on success, or a negative errno.
3258 */
3259 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3260 struct nfs_fsinfo *info,
3261 bool auth_probe)
3262 {
3263 int status = 0;
3264
3265 if (!auth_probe)
3266 status = nfs4_lookup_root(server, fhandle, info);
3267
3268 if (auth_probe || status == NFS4ERR_WRONGSEC)
3269 status = nfs4_do_find_root_sec(server, fhandle, info);
3270
3271 if (status == 0)
3272 status = nfs4_server_capabilities(server, fhandle);
3273 if (status == 0)
3274 status = nfs4_do_fsinfo(server, fhandle, info);
3275
3276 return nfs4_map_errors(status);
3277 }
3278
3279 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3280 struct nfs_fsinfo *info)
3281 {
3282 int error;
3283 struct nfs_fattr *fattr = info->fattr;
3284 struct nfs4_label *label = NULL;
3285
3286 error = nfs4_server_capabilities(server, mntfh);
3287 if (error < 0) {
3288 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3289 return error;
3290 }
3291
3292 label = nfs4_label_alloc(server, GFP_KERNEL);
3293 if (IS_ERR(label))
3294 return PTR_ERR(label);
3295
3296 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3297 if (error < 0) {
3298 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3299 goto err_free_label;
3300 }
3301
3302 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3303 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3304 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3305
3306 err_free_label:
3307 nfs4_label_free(label);
3308
3309 return error;
3310 }
3311
3312 /*
3313 * Get locations and (maybe) other attributes of a referral.
3314 * Note that we'll actually follow the referral later when
3315 * we detect fsid mismatch in inode revalidation
3316 */
3317 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3318 const struct qstr *name, struct nfs_fattr *fattr,
3319 struct nfs_fh *fhandle)
3320 {
3321 int status = -ENOMEM;
3322 struct page *page = NULL;
3323 struct nfs4_fs_locations *locations = NULL;
3324
3325 page = alloc_page(GFP_KERNEL);
3326 if (page == NULL)
3327 goto out;
3328 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3329 if (locations == NULL)
3330 goto out;
3331
3332 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3333 if (status != 0)
3334 goto out;
3335
3336 /*
3337 * If the fsid didn't change, this is a migration event, not a
3338 * referral. Cause us to drop into the exception handler, which
3339 * will kick off migration recovery.
3340 */
3341 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3342 dprintk("%s: server did not return a different fsid for"
3343 " a referral at %s\n", __func__, name->name);
3344 status = -NFS4ERR_MOVED;
3345 goto out;
3346 }
3347 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3348 nfs_fixup_referral_attributes(&locations->fattr);
3349
3350 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3351 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3352 memset(fhandle, 0, sizeof(struct nfs_fh));
3353 out:
3354 if (page)
3355 __free_page(page);
3356 kfree(locations);
3357 return status;
3358 }
3359
3360 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3361 struct nfs_fattr *fattr, struct nfs4_label *label)
3362 {
3363 struct nfs4_getattr_arg args = {
3364 .fh = fhandle,
3365 .bitmask = server->attr_bitmask,
3366 };
3367 struct nfs4_getattr_res res = {
3368 .fattr = fattr,
3369 .label = label,
3370 .server = server,
3371 };
3372 struct rpc_message msg = {
3373 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3374 .rpc_argp = &args,
3375 .rpc_resp = &res,
3376 };
3377
3378 args.bitmask = nfs4_bitmask(server, label);
3379
3380 nfs_fattr_init(fattr);
3381 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3382 }
3383
3384 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3385 struct nfs_fattr *fattr, struct nfs4_label *label)
3386 {
3387 struct nfs4_exception exception = { };
3388 int err;
3389 do {
3390 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3391 trace_nfs4_getattr(server, fhandle, fattr, err);
3392 err = nfs4_handle_exception(server, err,
3393 &exception);
3394 } while (exception.retry);
3395 return err;
3396 }
3397
3398 /*
3399 * The file is not closed if it is opened due to the a request to change
3400 * the size of the file. The open call will not be needed once the
3401 * VFS layer lookup-intents are implemented.
3402 *
3403 * Close is called when the inode is destroyed.
3404 * If we haven't opened the file for O_WRONLY, we
3405 * need to in the size_change case to obtain a stateid.
3406 *
3407 * Got race?
3408 * Because OPEN is always done by name in nfsv4, it is
3409 * possible that we opened a different file by the same
3410 * name. We can recognize this race condition, but we
3411 * can't do anything about it besides returning an error.
3412 *
3413 * This will be fixed with VFS changes (lookup-intent).
3414 */
3415 static int
3416 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3417 struct iattr *sattr)
3418 {
3419 struct inode *inode = d_inode(dentry);
3420 struct rpc_cred *cred = NULL;
3421 struct nfs4_state *state = NULL;
3422 struct nfs4_label *label = NULL;
3423 int status;
3424
3425 if (pnfs_ld_layoutret_on_setattr(inode) &&
3426 sattr->ia_valid & ATTR_SIZE &&
3427 sattr->ia_size < i_size_read(inode))
3428 pnfs_commit_and_return_layout(inode);
3429
3430 nfs_fattr_init(fattr);
3431
3432 /* Deal with open(O_TRUNC) */
3433 if (sattr->ia_valid & ATTR_OPEN)
3434 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3435
3436 /* Optimization: if the end result is no change, don't RPC */
3437 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3438 return 0;
3439
3440 /* Search for an existing open(O_WRITE) file */
3441 if (sattr->ia_valid & ATTR_FILE) {
3442 struct nfs_open_context *ctx;
3443
3444 ctx = nfs_file_open_context(sattr->ia_file);
3445 if (ctx) {
3446 cred = ctx->cred;
3447 state = ctx->state;
3448 }
3449 }
3450
3451 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3452 if (IS_ERR(label))
3453 return PTR_ERR(label);
3454
3455 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3456 if (status == 0) {
3457 nfs_setattr_update_inode(inode, sattr, fattr);
3458 nfs_setsecurity(inode, fattr, label);
3459 }
3460 nfs4_label_free(label);
3461 return status;
3462 }
3463
3464 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3465 const struct qstr *name, struct nfs_fh *fhandle,
3466 struct nfs_fattr *fattr, struct nfs4_label *label)
3467 {
3468 struct nfs_server *server = NFS_SERVER(dir);
3469 int status;
3470 struct nfs4_lookup_arg args = {
3471 .bitmask = server->attr_bitmask,
3472 .dir_fh = NFS_FH(dir),
3473 .name = name,
3474 };
3475 struct nfs4_lookup_res res = {
3476 .server = server,
3477 .fattr = fattr,
3478 .label = label,
3479 .fh = fhandle,
3480 };
3481 struct rpc_message msg = {
3482 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3483 .rpc_argp = &args,
3484 .rpc_resp = &res,
3485 };
3486
3487 args.bitmask = nfs4_bitmask(server, label);
3488
3489 nfs_fattr_init(fattr);
3490
3491 dprintk("NFS call lookup %s\n", name->name);
3492 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3493 dprintk("NFS reply lookup: %d\n", status);
3494 return status;
3495 }
3496
3497 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3498 {
3499 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3500 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3501 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3502 fattr->nlink = 2;
3503 }
3504
3505 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3506 struct qstr *name, struct nfs_fh *fhandle,
3507 struct nfs_fattr *fattr, struct nfs4_label *label)
3508 {
3509 struct nfs4_exception exception = { };
3510 struct rpc_clnt *client = *clnt;
3511 int err;
3512 do {
3513 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3514 trace_nfs4_lookup(dir, name, err);
3515 switch (err) {
3516 case -NFS4ERR_BADNAME:
3517 err = -ENOENT;
3518 goto out;
3519 case -NFS4ERR_MOVED:
3520 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3521 if (err == -NFS4ERR_MOVED)
3522 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3523 goto out;
3524 case -NFS4ERR_WRONGSEC:
3525 err = -EPERM;
3526 if (client != *clnt)
3527 goto out;
3528 client = nfs4_negotiate_security(client, dir, name);
3529 if (IS_ERR(client))
3530 return PTR_ERR(client);
3531
3532 exception.retry = 1;
3533 break;
3534 default:
3535 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3536 }
3537 } while (exception.retry);
3538
3539 out:
3540 if (err == 0)
3541 *clnt = client;
3542 else if (client != *clnt)
3543 rpc_shutdown_client(client);
3544
3545 return err;
3546 }
3547
3548 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
3549 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3550 struct nfs4_label *label)
3551 {
3552 int status;
3553 struct rpc_clnt *client = NFS_CLIENT(dir);
3554
3555 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3556 if (client != NFS_CLIENT(dir)) {
3557 rpc_shutdown_client(client);
3558 nfs_fixup_secinfo_attributes(fattr);
3559 }
3560 return status;
3561 }
3562
3563 struct rpc_clnt *
3564 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3565 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3566 {
3567 struct rpc_clnt *client = NFS_CLIENT(dir);
3568 int status;
3569
3570 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3571 if (status < 0)
3572 return ERR_PTR(status);
3573 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3574 }
3575
3576 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3577 {
3578 struct nfs_server *server = NFS_SERVER(inode);
3579 struct nfs4_accessargs args = {
3580 .fh = NFS_FH(inode),
3581 .bitmask = server->cache_consistency_bitmask,
3582 };
3583 struct nfs4_accessres res = {
3584 .server = server,
3585 };
3586 struct rpc_message msg = {
3587 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3588 .rpc_argp = &args,
3589 .rpc_resp = &res,
3590 .rpc_cred = entry->cred,
3591 };
3592 int mode = entry->mask;
3593 int status = 0;
3594
3595 /*
3596 * Determine which access bits we want to ask for...
3597 */
3598 if (mode & MAY_READ)
3599 args.access |= NFS4_ACCESS_READ;
3600 if (S_ISDIR(inode->i_mode)) {
3601 if (mode & MAY_WRITE)
3602 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3603 if (mode & MAY_EXEC)
3604 args.access |= NFS4_ACCESS_LOOKUP;
3605 } else {
3606 if (mode & MAY_WRITE)
3607 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3608 if (mode & MAY_EXEC)
3609 args.access |= NFS4_ACCESS_EXECUTE;
3610 }
3611
3612 res.fattr = nfs_alloc_fattr();
3613 if (res.fattr == NULL)
3614 return -ENOMEM;
3615
3616 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3617 if (!status) {
3618 nfs_access_set_mask(entry, res.access);
3619 nfs_refresh_inode(inode, res.fattr);
3620 }
3621 nfs_free_fattr(res.fattr);
3622 return status;
3623 }
3624
3625 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3626 {
3627 struct nfs4_exception exception = { };
3628 int err;
3629 do {
3630 err = _nfs4_proc_access(inode, entry);
3631 trace_nfs4_access(inode, err);
3632 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3633 &exception);
3634 } while (exception.retry);
3635 return err;
3636 }
3637
3638 /*
3639 * TODO: For the time being, we don't try to get any attributes
3640 * along with any of the zero-copy operations READ, READDIR,
3641 * READLINK, WRITE.
3642 *
3643 * In the case of the first three, we want to put the GETATTR
3644 * after the read-type operation -- this is because it is hard
3645 * to predict the length of a GETATTR response in v4, and thus
3646 * align the READ data correctly. This means that the GETATTR
3647 * may end up partially falling into the page cache, and we should
3648 * shift it into the 'tail' of the xdr_buf before processing.
3649 * To do this efficiently, we need to know the total length
3650 * of data received, which doesn't seem to be available outside
3651 * of the RPC layer.
3652 *
3653 * In the case of WRITE, we also want to put the GETATTR after
3654 * the operation -- in this case because we want to make sure
3655 * we get the post-operation mtime and size.
3656 *
3657 * Both of these changes to the XDR layer would in fact be quite
3658 * minor, but I decided to leave them for a subsequent patch.
3659 */
3660 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3661 unsigned int pgbase, unsigned int pglen)
3662 {
3663 struct nfs4_readlink args = {
3664 .fh = NFS_FH(inode),
3665 .pgbase = pgbase,
3666 .pglen = pglen,
3667 .pages = &page,
3668 };
3669 struct nfs4_readlink_res res;
3670 struct rpc_message msg = {
3671 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3672 .rpc_argp = &args,
3673 .rpc_resp = &res,
3674 };
3675
3676 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3677 }
3678
3679 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3680 unsigned int pgbase, unsigned int pglen)
3681 {
3682 struct nfs4_exception exception = { };
3683 int err;
3684 do {
3685 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3686 trace_nfs4_readlink(inode, err);
3687 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3688 &exception);
3689 } while (exception.retry);
3690 return err;
3691 }
3692
3693 /*
3694 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3695 */
3696 static int
3697 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3698 int flags)
3699 {
3700 struct nfs4_label l, *ilabel = NULL;
3701 struct nfs_open_context *ctx;
3702 struct nfs4_state *state;
3703 int status = 0;
3704
3705 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3706 if (IS_ERR(ctx))
3707 return PTR_ERR(ctx);
3708
3709 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3710
3711 sattr->ia_mode &= ~current_umask();
3712 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
3713 if (IS_ERR(state)) {
3714 status = PTR_ERR(state);
3715 goto out;
3716 }
3717 out:
3718 nfs4_label_release_security(ilabel);
3719 put_nfs_open_context(ctx);
3720 return status;
3721 }
3722
3723 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3724 {
3725 struct nfs_server *server = NFS_SERVER(dir);
3726 struct nfs_removeargs args = {
3727 .fh = NFS_FH(dir),
3728 .name = *name,
3729 };
3730 struct nfs_removeres res = {
3731 .server = server,
3732 };
3733 struct rpc_message msg = {
3734 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3735 .rpc_argp = &args,
3736 .rpc_resp = &res,
3737 };
3738 int status;
3739
3740 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3741 if (status == 0)
3742 update_changeattr(dir, &res.cinfo);
3743 return status;
3744 }
3745
3746 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3747 {
3748 struct nfs4_exception exception = { };
3749 int err;
3750 do {
3751 err = _nfs4_proc_remove(dir, name);
3752 trace_nfs4_remove(dir, name, err);
3753 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3754 &exception);
3755 } while (exception.retry);
3756 return err;
3757 }
3758
3759 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3760 {
3761 struct nfs_server *server = NFS_SERVER(dir);
3762 struct nfs_removeargs *args = msg->rpc_argp;
3763 struct nfs_removeres *res = msg->rpc_resp;
3764
3765 res->server = server;
3766 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3767 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3768
3769 nfs_fattr_init(res->dir_attr);
3770 }
3771
3772 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3773 {
3774 nfs4_setup_sequence(NFS_SERVER(data->dir),
3775 &data->args.seq_args,
3776 &data->res.seq_res,
3777 task);
3778 }
3779
3780 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3781 {
3782 struct nfs_unlinkdata *data = task->tk_calldata;
3783 struct nfs_removeres *res = &data->res;
3784
3785 if (!nfs4_sequence_done(task, &res->seq_res))
3786 return 0;
3787 if (nfs4_async_handle_error(task, res->server, NULL,
3788 &data->timeout) == -EAGAIN)
3789 return 0;
3790 update_changeattr(dir, &res->cinfo);
3791 return 1;
3792 }
3793
3794 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3795 {
3796 struct nfs_server *server = NFS_SERVER(dir);
3797 struct nfs_renameargs *arg = msg->rpc_argp;
3798 struct nfs_renameres *res = msg->rpc_resp;
3799
3800 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3801 res->server = server;
3802 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3803 }
3804
3805 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3806 {
3807 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3808 &data->args.seq_args,
3809 &data->res.seq_res,
3810 task);
3811 }
3812
3813 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3814 struct inode *new_dir)
3815 {
3816 struct nfs_renamedata *data = task->tk_calldata;
3817 struct nfs_renameres *res = &data->res;
3818
3819 if (!nfs4_sequence_done(task, &res->seq_res))
3820 return 0;
3821 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3822 return 0;
3823
3824 update_changeattr(old_dir, &res->old_cinfo);
3825 update_changeattr(new_dir, &res->new_cinfo);
3826 return 1;
3827 }
3828
3829 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3830 {
3831 struct nfs_server *server = NFS_SERVER(inode);
3832 struct nfs4_link_arg arg = {
3833 .fh = NFS_FH(inode),
3834 .dir_fh = NFS_FH(dir),
3835 .name = name,
3836 .bitmask = server->attr_bitmask,
3837 };
3838 struct nfs4_link_res res = {
3839 .server = server,
3840 .label = NULL,
3841 };
3842 struct rpc_message msg = {
3843 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3844 .rpc_argp = &arg,
3845 .rpc_resp = &res,
3846 };
3847 int status = -ENOMEM;
3848
3849 res.fattr = nfs_alloc_fattr();
3850 if (res.fattr == NULL)
3851 goto out;
3852
3853 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3854 if (IS_ERR(res.label)) {
3855 status = PTR_ERR(res.label);
3856 goto out;
3857 }
3858 arg.bitmask = nfs4_bitmask(server, res.label);
3859
3860 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3861 if (!status) {
3862 update_changeattr(dir, &res.cinfo);
3863 status = nfs_post_op_update_inode(inode, res.fattr);
3864 if (!status)
3865 nfs_setsecurity(inode, res.fattr, res.label);
3866 }
3867
3868
3869 nfs4_label_free(res.label);
3870
3871 out:
3872 nfs_free_fattr(res.fattr);
3873 return status;
3874 }
3875
3876 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3877 {
3878 struct nfs4_exception exception = { };
3879 int err;
3880 do {
3881 err = nfs4_handle_exception(NFS_SERVER(inode),
3882 _nfs4_proc_link(inode, dir, name),
3883 &exception);
3884 } while (exception.retry);
3885 return err;
3886 }
3887
3888 struct nfs4_createdata {
3889 struct rpc_message msg;
3890 struct nfs4_create_arg arg;
3891 struct nfs4_create_res res;
3892 struct nfs_fh fh;
3893 struct nfs_fattr fattr;
3894 struct nfs4_label *label;
3895 };
3896
3897 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3898 struct qstr *name, struct iattr *sattr, u32 ftype)
3899 {
3900 struct nfs4_createdata *data;
3901
3902 data = kzalloc(sizeof(*data), GFP_KERNEL);
3903 if (data != NULL) {
3904 struct nfs_server *server = NFS_SERVER(dir);
3905
3906 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3907 if (IS_ERR(data->label))
3908 goto out_free;
3909
3910 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3911 data->msg.rpc_argp = &data->arg;
3912 data->msg.rpc_resp = &data->res;
3913 data->arg.dir_fh = NFS_FH(dir);
3914 data->arg.server = server;
3915 data->arg.name = name;
3916 data->arg.attrs = sattr;
3917 data->arg.ftype = ftype;
3918 data->arg.bitmask = nfs4_bitmask(server, data->label);
3919 data->res.server = server;
3920 data->res.fh = &data->fh;
3921 data->res.fattr = &data->fattr;
3922 data->res.label = data->label;
3923 nfs_fattr_init(data->res.fattr);
3924 }
3925 return data;
3926 out_free:
3927 kfree(data);
3928 return NULL;
3929 }
3930
3931 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3932 {
3933 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3934 &data->arg.seq_args, &data->res.seq_res, 1);
3935 if (status == 0) {
3936 update_changeattr(dir, &data->res.dir_cinfo);
3937 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
3938 }
3939 return status;
3940 }
3941
3942 static void nfs4_free_createdata(struct nfs4_createdata *data)
3943 {
3944 nfs4_label_free(data->label);
3945 kfree(data);
3946 }
3947
3948 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3949 struct page *page, unsigned int len, struct iattr *sattr,
3950 struct nfs4_label *label)
3951 {
3952 struct nfs4_createdata *data;
3953 int status = -ENAMETOOLONG;
3954
3955 if (len > NFS4_MAXPATHLEN)
3956 goto out;
3957
3958 status = -ENOMEM;
3959 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3960 if (data == NULL)
3961 goto out;
3962
3963 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3964 data->arg.u.symlink.pages = &page;
3965 data->arg.u.symlink.len = len;
3966 data->arg.label = label;
3967
3968 status = nfs4_do_create(dir, dentry, data);
3969
3970 nfs4_free_createdata(data);
3971 out:
3972 return status;
3973 }
3974
3975 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3976 struct page *page, unsigned int len, struct iattr *sattr)
3977 {
3978 struct nfs4_exception exception = { };
3979 struct nfs4_label l, *label = NULL;
3980 int err;
3981
3982 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3983
3984 do {
3985 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
3986 trace_nfs4_symlink(dir, &dentry->d_name, err);
3987 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3988 &exception);
3989 } while (exception.retry);
3990
3991 nfs4_label_release_security(label);
3992 return err;
3993 }
3994
3995 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3996 struct iattr *sattr, struct nfs4_label *label)
3997 {
3998 struct nfs4_createdata *data;
3999 int status = -ENOMEM;
4000
4001 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
4002 if (data == NULL)
4003 goto out;
4004
4005 data->arg.label = label;
4006 status = nfs4_do_create(dir, dentry, data);
4007
4008 nfs4_free_createdata(data);
4009 out:
4010 return status;
4011 }
4012
4013 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4014 struct iattr *sattr)
4015 {
4016 struct nfs4_exception exception = { };
4017 struct nfs4_label l, *label = NULL;
4018 int err;
4019
4020 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4021
4022 sattr->ia_mode &= ~current_umask();
4023 do {
4024 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
4025 trace_nfs4_mkdir(dir, &dentry->d_name, err);
4026 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4027 &exception);
4028 } while (exception.retry);
4029 nfs4_label_release_security(label);
4030
4031 return err;
4032 }
4033
4034 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4035 u64 cookie, struct page **pages, unsigned int count, int plus)
4036 {
4037 struct inode *dir = d_inode(dentry);
4038 struct nfs4_readdir_arg args = {
4039 .fh = NFS_FH(dir),
4040 .pages = pages,
4041 .pgbase = 0,
4042 .count = count,
4043 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
4044 .plus = plus,
4045 };
4046 struct nfs4_readdir_res res;
4047 struct rpc_message msg = {
4048 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
4049 .rpc_argp = &args,
4050 .rpc_resp = &res,
4051 .rpc_cred = cred,
4052 };
4053 int status;
4054
4055 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
4056 dentry,
4057 (unsigned long long)cookie);
4058 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
4059 res.pgbase = args.pgbase;
4060 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
4061 if (status >= 0) {
4062 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
4063 status += args.pgbase;
4064 }
4065
4066 nfs_invalidate_atime(dir);
4067
4068 dprintk("%s: returns %d\n", __func__, status);
4069 return status;
4070 }
4071
4072 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4073 u64 cookie, struct page **pages, unsigned int count, int plus)
4074 {
4075 struct nfs4_exception exception = { };
4076 int err;
4077 do {
4078 err = _nfs4_proc_readdir(dentry, cred, cookie,
4079 pages, count, plus);
4080 trace_nfs4_readdir(d_inode(dentry), err);
4081 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
4082 &exception);
4083 } while (exception.retry);
4084 return err;
4085 }
4086
4087 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4088 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
4089 {
4090 struct nfs4_createdata *data;
4091 int mode = sattr->ia_mode;
4092 int status = -ENOMEM;
4093
4094 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
4095 if (data == NULL)
4096 goto out;
4097
4098 if (S_ISFIFO(mode))
4099 data->arg.ftype = NF4FIFO;
4100 else if (S_ISBLK(mode)) {
4101 data->arg.ftype = NF4BLK;
4102 data->arg.u.device.specdata1 = MAJOR(rdev);
4103 data->arg.u.device.specdata2 = MINOR(rdev);
4104 }
4105 else if (S_ISCHR(mode)) {
4106 data->arg.ftype = NF4CHR;
4107 data->arg.u.device.specdata1 = MAJOR(rdev);
4108 data->arg.u.device.specdata2 = MINOR(rdev);
4109 } else if (!S_ISSOCK(mode)) {
4110 status = -EINVAL;
4111 goto out_free;
4112 }
4113
4114 data->arg.label = label;
4115 status = nfs4_do_create(dir, dentry, data);
4116 out_free:
4117 nfs4_free_createdata(data);
4118 out:
4119 return status;
4120 }
4121
4122 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4123 struct iattr *sattr, dev_t rdev)
4124 {
4125 struct nfs4_exception exception = { };
4126 struct nfs4_label l, *label = NULL;
4127 int err;
4128
4129 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4130
4131 sattr->ia_mode &= ~current_umask();
4132 do {
4133 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
4134 trace_nfs4_mknod(dir, &dentry->d_name, err);
4135 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4136 &exception);
4137 } while (exception.retry);
4138
4139 nfs4_label_release_security(label);
4140
4141 return err;
4142 }
4143
4144 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
4145 struct nfs_fsstat *fsstat)
4146 {
4147 struct nfs4_statfs_arg args = {
4148 .fh = fhandle,
4149 .bitmask = server->attr_bitmask,
4150 };
4151 struct nfs4_statfs_res res = {
4152 .fsstat = fsstat,
4153 };
4154 struct rpc_message msg = {
4155 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
4156 .rpc_argp = &args,
4157 .rpc_resp = &res,
4158 };
4159
4160 nfs_fattr_init(fsstat->fattr);
4161 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4162 }
4163
4164 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4165 {
4166 struct nfs4_exception exception = { };
4167 int err;
4168 do {
4169 err = nfs4_handle_exception(server,
4170 _nfs4_proc_statfs(server, fhandle, fsstat),
4171 &exception);
4172 } while (exception.retry);
4173 return err;
4174 }
4175
4176 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4177 struct nfs_fsinfo *fsinfo)
4178 {
4179 struct nfs4_fsinfo_arg args = {
4180 .fh = fhandle,
4181 .bitmask = server->attr_bitmask,
4182 };
4183 struct nfs4_fsinfo_res res = {
4184 .fsinfo = fsinfo,
4185 };
4186 struct rpc_message msg = {
4187 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4188 .rpc_argp = &args,
4189 .rpc_resp = &res,
4190 };
4191
4192 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4193 }
4194
4195 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4196 {
4197 struct nfs4_exception exception = { };
4198 unsigned long now = jiffies;
4199 int err;
4200
4201 do {
4202 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4203 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4204 if (err == 0) {
4205 struct nfs_client *clp = server->nfs_client;
4206
4207 spin_lock(&clp->cl_lock);
4208 clp->cl_lease_time = fsinfo->lease_time * HZ;
4209 clp->cl_last_renewal = now;
4210 spin_unlock(&clp->cl_lock);
4211 break;
4212 }
4213 err = nfs4_handle_exception(server, err, &exception);
4214 } while (exception.retry);
4215 return err;
4216 }
4217
4218 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4219 {
4220 int error;
4221
4222 nfs_fattr_init(fsinfo->fattr);
4223 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4224 if (error == 0) {
4225 /* block layout checks this! */
4226 server->pnfs_blksize = fsinfo->blksize;
4227 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4228 }
4229
4230 return error;
4231 }
4232
4233 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4234 struct nfs_pathconf *pathconf)
4235 {
4236 struct nfs4_pathconf_arg args = {
4237 .fh = fhandle,
4238 .bitmask = server->attr_bitmask,
4239 };
4240 struct nfs4_pathconf_res res = {
4241 .pathconf = pathconf,
4242 };
4243 struct rpc_message msg = {
4244 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4245 .rpc_argp = &args,
4246 .rpc_resp = &res,
4247 };
4248
4249 /* None of the pathconf attributes are mandatory to implement */
4250 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4251 memset(pathconf, 0, sizeof(*pathconf));
4252 return 0;
4253 }
4254
4255 nfs_fattr_init(pathconf->fattr);
4256 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4257 }
4258
4259 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4260 struct nfs_pathconf *pathconf)
4261 {
4262 struct nfs4_exception exception = { };
4263 int err;
4264
4265 do {
4266 err = nfs4_handle_exception(server,
4267 _nfs4_proc_pathconf(server, fhandle, pathconf),
4268 &exception);
4269 } while (exception.retry);
4270 return err;
4271 }
4272
4273 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4274 const struct nfs_open_context *ctx,
4275 const struct nfs_lock_context *l_ctx,
4276 fmode_t fmode)
4277 {
4278 const struct nfs_lockowner *lockowner = NULL;
4279
4280 if (l_ctx != NULL)
4281 lockowner = &l_ctx->lockowner;
4282 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
4283 }
4284 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4285
4286 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4287 const struct nfs_open_context *ctx,
4288 const struct nfs_lock_context *l_ctx,
4289 fmode_t fmode)
4290 {
4291 nfs4_stateid current_stateid;
4292
4293 /* If the current stateid represents a lost lock, then exit */
4294 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4295 return true;
4296 return nfs4_stateid_match(stateid, &current_stateid);
4297 }
4298
4299 static bool nfs4_error_stateid_expired(int err)
4300 {
4301 switch (err) {
4302 case -NFS4ERR_DELEG_REVOKED:
4303 case -NFS4ERR_ADMIN_REVOKED:
4304 case -NFS4ERR_BAD_STATEID:
4305 case -NFS4ERR_STALE_STATEID:
4306 case -NFS4ERR_OLD_STATEID:
4307 case -NFS4ERR_OPENMODE:
4308 case -NFS4ERR_EXPIRED:
4309 return true;
4310 }
4311 return false;
4312 }
4313
4314 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4315 {
4316 nfs_invalidate_atime(hdr->inode);
4317 }
4318
4319 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4320 {
4321 struct nfs_server *server = NFS_SERVER(hdr->inode);
4322
4323 trace_nfs4_read(hdr, task->tk_status);
4324 if (nfs4_async_handle_error(task, server,
4325 hdr->args.context->state,
4326 NULL) == -EAGAIN) {
4327 rpc_restart_call_prepare(task);
4328 return -EAGAIN;
4329 }
4330
4331 __nfs4_read_done_cb(hdr);
4332 if (task->tk_status > 0)
4333 renew_lease(server, hdr->timestamp);
4334 return 0;
4335 }
4336
4337 static bool nfs4_read_stateid_changed(struct rpc_task *task,
4338 struct nfs_pgio_args *args)
4339 {
4340
4341 if (!nfs4_error_stateid_expired(task->tk_status) ||
4342 nfs4_stateid_is_current(&args->stateid,
4343 args->context,
4344 args->lock_context,
4345 FMODE_READ))
4346 return false;
4347 rpc_restart_call_prepare(task);
4348 return true;
4349 }
4350
4351 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4352 {
4353
4354 dprintk("--> %s\n", __func__);
4355
4356 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4357 return -EAGAIN;
4358 if (nfs4_read_stateid_changed(task, &hdr->args))
4359 return -EAGAIN;
4360 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4361 nfs4_read_done_cb(task, hdr);
4362 }
4363
4364 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4365 struct rpc_message *msg)
4366 {
4367 hdr->timestamp = jiffies;
4368 hdr->pgio_done_cb = nfs4_read_done_cb;
4369 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4370 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4371 }
4372
4373 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4374 struct nfs_pgio_header *hdr)
4375 {
4376 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4377 &hdr->args.seq_args,
4378 &hdr->res.seq_res,
4379 task))
4380 return 0;
4381 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4382 hdr->args.lock_context,
4383 hdr->rw_ops->rw_mode) == -EIO)
4384 return -EIO;
4385 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4386 return -EIO;
4387 return 0;
4388 }
4389
4390 static int nfs4_write_done_cb(struct rpc_task *task,
4391 struct nfs_pgio_header *hdr)
4392 {
4393 struct inode *inode = hdr->inode;
4394
4395 trace_nfs4_write(hdr, task->tk_status);
4396 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4397 hdr->args.context->state,
4398 NULL) == -EAGAIN) {
4399 rpc_restart_call_prepare(task);
4400 return -EAGAIN;
4401 }
4402 if (task->tk_status >= 0) {
4403 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4404 nfs_writeback_update_inode(hdr);
4405 }
4406 return 0;
4407 }
4408
4409 static bool nfs4_write_stateid_changed(struct rpc_task *task,
4410 struct nfs_pgio_args *args)
4411 {
4412
4413 if (!nfs4_error_stateid_expired(task->tk_status) ||
4414 nfs4_stateid_is_current(&args->stateid,
4415 args->context,
4416 args->lock_context,
4417 FMODE_WRITE))
4418 return false;
4419 rpc_restart_call_prepare(task);
4420 return true;
4421 }
4422
4423 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4424 {
4425 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4426 return -EAGAIN;
4427 if (nfs4_write_stateid_changed(task, &hdr->args))
4428 return -EAGAIN;
4429 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4430 nfs4_write_done_cb(task, hdr);
4431 }
4432
4433 static
4434 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4435 {
4436 /* Don't request attributes for pNFS or O_DIRECT writes */
4437 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4438 return false;
4439 /* Otherwise, request attributes if and only if we don't hold
4440 * a delegation
4441 */
4442 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4443 }
4444
4445 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4446 struct rpc_message *msg)
4447 {
4448 struct nfs_server *server = NFS_SERVER(hdr->inode);
4449
4450 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4451 hdr->args.bitmask = NULL;
4452 hdr->res.fattr = NULL;
4453 } else
4454 hdr->args.bitmask = server->cache_consistency_bitmask;
4455
4456 if (!hdr->pgio_done_cb)
4457 hdr->pgio_done_cb = nfs4_write_done_cb;
4458 hdr->res.server = server;
4459 hdr->timestamp = jiffies;
4460
4461 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4462 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4463 }
4464
4465 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4466 {
4467 nfs4_setup_sequence(NFS_SERVER(data->inode),
4468 &data->args.seq_args,
4469 &data->res.seq_res,
4470 task);
4471 }
4472
4473 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4474 {
4475 struct inode *inode = data->inode;
4476
4477 trace_nfs4_commit(data, task->tk_status);
4478 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4479 NULL, NULL) == -EAGAIN) {
4480 rpc_restart_call_prepare(task);
4481 return -EAGAIN;
4482 }
4483 return 0;
4484 }
4485
4486 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4487 {
4488 if (!nfs4_sequence_done(task, &data->res.seq_res))
4489 return -EAGAIN;
4490 return data->commit_done_cb(task, data);
4491 }
4492
4493 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4494 {
4495 struct nfs_server *server = NFS_SERVER(data->inode);
4496
4497 if (data->commit_done_cb == NULL)
4498 data->commit_done_cb = nfs4_commit_done_cb;
4499 data->res.server = server;
4500 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4501 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4502 }
4503
4504 struct nfs4_renewdata {
4505 struct nfs_client *client;
4506 unsigned long timestamp;
4507 };
4508
4509 /*
4510 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4511 * standalone procedure for queueing an asynchronous RENEW.
4512 */
4513 static void nfs4_renew_release(void *calldata)
4514 {
4515 struct nfs4_renewdata *data = calldata;
4516 struct nfs_client *clp = data->client;
4517
4518 if (atomic_read(&clp->cl_count) > 1)
4519 nfs4_schedule_state_renewal(clp);
4520 nfs_put_client(clp);
4521 kfree(data);
4522 }
4523
4524 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4525 {
4526 struct nfs4_renewdata *data = calldata;
4527 struct nfs_client *clp = data->client;
4528 unsigned long timestamp = data->timestamp;
4529
4530 trace_nfs4_renew_async(clp, task->tk_status);
4531 switch (task->tk_status) {
4532 case 0:
4533 break;
4534 case -NFS4ERR_LEASE_MOVED:
4535 nfs4_schedule_lease_moved_recovery(clp);
4536 break;
4537 default:
4538 /* Unless we're shutting down, schedule state recovery! */
4539 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4540 return;
4541 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4542 nfs4_schedule_lease_recovery(clp);
4543 return;
4544 }
4545 nfs4_schedule_path_down_recovery(clp);
4546 }
4547 do_renew_lease(clp, timestamp);
4548 }
4549
4550 static const struct rpc_call_ops nfs4_renew_ops = {
4551 .rpc_call_done = nfs4_renew_done,
4552 .rpc_release = nfs4_renew_release,
4553 };
4554
4555 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4556 {
4557 struct rpc_message msg = {
4558 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4559 .rpc_argp = clp,
4560 .rpc_cred = cred,
4561 };
4562 struct nfs4_renewdata *data;
4563
4564 if (renew_flags == 0)
4565 return 0;
4566 if (!atomic_inc_not_zero(&clp->cl_count))
4567 return -EIO;
4568 data = kmalloc(sizeof(*data), GFP_NOFS);
4569 if (data == NULL)
4570 return -ENOMEM;
4571 data->client = clp;
4572 data->timestamp = jiffies;
4573 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4574 &nfs4_renew_ops, data);
4575 }
4576
4577 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4578 {
4579 struct rpc_message msg = {
4580 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4581 .rpc_argp = clp,
4582 .rpc_cred = cred,
4583 };
4584 unsigned long now = jiffies;
4585 int status;
4586
4587 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4588 if (status < 0)
4589 return status;
4590 do_renew_lease(clp, now);
4591 return 0;
4592 }
4593
4594 static inline int nfs4_server_supports_acls(struct nfs_server *server)
4595 {
4596 return server->caps & NFS_CAP_ACLS;
4597 }
4598
4599 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4600 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4601 * the stack.
4602 */
4603 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4604
4605 static int buf_to_pages_noslab(const void *buf, size_t buflen,
4606 struct page **pages)
4607 {
4608 struct page *newpage, **spages;
4609 int rc = 0;
4610 size_t len;
4611 spages = pages;
4612
4613 do {
4614 len = min_t(size_t, PAGE_SIZE, buflen);
4615 newpage = alloc_page(GFP_KERNEL);
4616
4617 if (newpage == NULL)
4618 goto unwind;
4619 memcpy(page_address(newpage), buf, len);
4620 buf += len;
4621 buflen -= len;
4622 *pages++ = newpage;
4623 rc++;
4624 } while (buflen != 0);
4625
4626 return rc;
4627
4628 unwind:
4629 for(; rc > 0; rc--)
4630 __free_page(spages[rc-1]);
4631 return -ENOMEM;
4632 }
4633
4634 struct nfs4_cached_acl {
4635 int cached;
4636 size_t len;
4637 char data[0];
4638 };
4639
4640 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4641 {
4642 struct nfs_inode *nfsi = NFS_I(inode);
4643
4644 spin_lock(&inode->i_lock);
4645 kfree(nfsi->nfs4_acl);
4646 nfsi->nfs4_acl = acl;
4647 spin_unlock(&inode->i_lock);
4648 }
4649
4650 static void nfs4_zap_acl_attr(struct inode *inode)
4651 {
4652 nfs4_set_cached_acl(inode, NULL);
4653 }
4654
4655 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4656 {
4657 struct nfs_inode *nfsi = NFS_I(inode);
4658 struct nfs4_cached_acl *acl;
4659 int ret = -ENOENT;
4660
4661 spin_lock(&inode->i_lock);
4662 acl = nfsi->nfs4_acl;
4663 if (acl == NULL)
4664 goto out;
4665 if (buf == NULL) /* user is just asking for length */
4666 goto out_len;
4667 if (acl->cached == 0)
4668 goto out;
4669 ret = -ERANGE; /* see getxattr(2) man page */
4670 if (acl->len > buflen)
4671 goto out;
4672 memcpy(buf, acl->data, acl->len);
4673 out_len:
4674 ret = acl->len;
4675 out:
4676 spin_unlock(&inode->i_lock);
4677 return ret;
4678 }
4679
4680 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4681 {
4682 struct nfs4_cached_acl *acl;
4683 size_t buflen = sizeof(*acl) + acl_len;
4684
4685 if (buflen <= PAGE_SIZE) {
4686 acl = kmalloc(buflen, GFP_KERNEL);
4687 if (acl == NULL)
4688 goto out;
4689 acl->cached = 1;
4690 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4691 } else {
4692 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4693 if (acl == NULL)
4694 goto out;
4695 acl->cached = 0;
4696 }
4697 acl->len = acl_len;
4698 out:
4699 nfs4_set_cached_acl(inode, acl);
4700 }
4701
4702 /*
4703 * The getxattr API returns the required buffer length when called with a
4704 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4705 * the required buf. On a NULL buf, we send a page of data to the server
4706 * guessing that the ACL request can be serviced by a page. If so, we cache
4707 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4708 * the cache. If not so, we throw away the page, and cache the required
4709 * length. The next getxattr call will then produce another round trip to
4710 * the server, this time with the input buf of the required size.
4711 */
4712 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4713 {
4714 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4715 struct nfs_getaclargs args = {
4716 .fh = NFS_FH(inode),
4717 .acl_pages = pages,
4718 .acl_len = buflen,
4719 };
4720 struct nfs_getaclres res = {
4721 .acl_len = buflen,
4722 };
4723 struct rpc_message msg = {
4724 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4725 .rpc_argp = &args,
4726 .rpc_resp = &res,
4727 };
4728 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4729 int ret = -ENOMEM, i;
4730
4731 /* As long as we're doing a round trip to the server anyway,
4732 * let's be prepared for a page of acl data. */
4733 if (npages == 0)
4734 npages = 1;
4735 if (npages > ARRAY_SIZE(pages))
4736 return -ERANGE;
4737
4738 for (i = 0; i < npages; i++) {
4739 pages[i] = alloc_page(GFP_KERNEL);
4740 if (!pages[i])
4741 goto out_free;
4742 }
4743
4744 /* for decoding across pages */
4745 res.acl_scratch = alloc_page(GFP_KERNEL);
4746 if (!res.acl_scratch)
4747 goto out_free;
4748
4749 args.acl_len = npages * PAGE_SIZE;
4750
4751 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4752 __func__, buf, buflen, npages, args.acl_len);
4753 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4754 &msg, &args.seq_args, &res.seq_res, 0);
4755 if (ret)
4756 goto out_free;
4757
4758 /* Handle the case where the passed-in buffer is too short */
4759 if (res.acl_flags & NFS4_ACL_TRUNC) {
4760 /* Did the user only issue a request for the acl length? */
4761 if (buf == NULL)
4762 goto out_ok;
4763 ret = -ERANGE;
4764 goto out_free;
4765 }
4766 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4767 if (buf) {
4768 if (res.acl_len > buflen) {
4769 ret = -ERANGE;
4770 goto out_free;
4771 }
4772 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4773 }
4774 out_ok:
4775 ret = res.acl_len;
4776 out_free:
4777 for (i = 0; i < npages; i++)
4778 if (pages[i])
4779 __free_page(pages[i]);
4780 if (res.acl_scratch)
4781 __free_page(res.acl_scratch);
4782 return ret;
4783 }
4784
4785 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4786 {
4787 struct nfs4_exception exception = { };
4788 ssize_t ret;
4789 do {
4790 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4791 trace_nfs4_get_acl(inode, ret);
4792 if (ret >= 0)
4793 break;
4794 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4795 } while (exception.retry);
4796 return ret;
4797 }
4798
4799 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4800 {
4801 struct nfs_server *server = NFS_SERVER(inode);
4802 int ret;
4803
4804 if (!nfs4_server_supports_acls(server))
4805 return -EOPNOTSUPP;
4806 ret = nfs_revalidate_inode(server, inode);
4807 if (ret < 0)
4808 return ret;
4809 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4810 nfs_zap_acl_cache(inode);
4811 ret = nfs4_read_cached_acl(inode, buf, buflen);
4812 if (ret != -ENOENT)
4813 /* -ENOENT is returned if there is no ACL or if there is an ACL
4814 * but no cached acl data, just the acl length */
4815 return ret;
4816 return nfs4_get_acl_uncached(inode, buf, buflen);
4817 }
4818
4819 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4820 {
4821 struct nfs_server *server = NFS_SERVER(inode);
4822 struct page *pages[NFS4ACL_MAXPAGES];
4823 struct nfs_setaclargs arg = {
4824 .fh = NFS_FH(inode),
4825 .acl_pages = pages,
4826 .acl_len = buflen,
4827 };
4828 struct nfs_setaclres res;
4829 struct rpc_message msg = {
4830 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4831 .rpc_argp = &arg,
4832 .rpc_resp = &res,
4833 };
4834 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4835 int ret, i;
4836
4837 if (!nfs4_server_supports_acls(server))
4838 return -EOPNOTSUPP;
4839 if (npages > ARRAY_SIZE(pages))
4840 return -ERANGE;
4841 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages);
4842 if (i < 0)
4843 return i;
4844 nfs4_inode_return_delegation(inode);
4845 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4846
4847 /*
4848 * Free each page after tx, so the only ref left is
4849 * held by the network stack
4850 */
4851 for (; i > 0; i--)
4852 put_page(pages[i-1]);
4853
4854 /*
4855 * Acl update can result in inode attribute update.
4856 * so mark the attribute cache invalid.
4857 */
4858 spin_lock(&inode->i_lock);
4859 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4860 spin_unlock(&inode->i_lock);
4861 nfs_access_zap_cache(inode);
4862 nfs_zap_acl_cache(inode);
4863 return ret;
4864 }
4865
4866 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4867 {
4868 struct nfs4_exception exception = { };
4869 int err;
4870 do {
4871 err = __nfs4_proc_set_acl(inode, buf, buflen);
4872 trace_nfs4_set_acl(inode, err);
4873 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4874 &exception);
4875 } while (exception.retry);
4876 return err;
4877 }
4878
4879 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4880 static int _nfs4_get_security_label(struct inode *inode, void *buf,
4881 size_t buflen)
4882 {
4883 struct nfs_server *server = NFS_SERVER(inode);
4884 struct nfs_fattr fattr;
4885 struct nfs4_label label = {0, 0, buflen, buf};
4886
4887 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4888 struct nfs4_getattr_arg arg = {
4889 .fh = NFS_FH(inode),
4890 .bitmask = bitmask,
4891 };
4892 struct nfs4_getattr_res res = {
4893 .fattr = &fattr,
4894 .label = &label,
4895 .server = server,
4896 };
4897 struct rpc_message msg = {
4898 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4899 .rpc_argp = &arg,
4900 .rpc_resp = &res,
4901 };
4902 int ret;
4903
4904 nfs_fattr_init(&fattr);
4905
4906 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4907 if (ret)
4908 return ret;
4909 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4910 return -ENOENT;
4911 if (buflen < label.len)
4912 return -ERANGE;
4913 return 0;
4914 }
4915
4916 static int nfs4_get_security_label(struct inode *inode, void *buf,
4917 size_t buflen)
4918 {
4919 struct nfs4_exception exception = { };
4920 int err;
4921
4922 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4923 return -EOPNOTSUPP;
4924
4925 do {
4926 err = _nfs4_get_security_label(inode, buf, buflen);
4927 trace_nfs4_get_security_label(inode, err);
4928 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4929 &exception);
4930 } while (exception.retry);
4931 return err;
4932 }
4933
4934 static int _nfs4_do_set_security_label(struct inode *inode,
4935 struct nfs4_label *ilabel,
4936 struct nfs_fattr *fattr,
4937 struct nfs4_label *olabel)
4938 {
4939
4940 struct iattr sattr = {0};
4941 struct nfs_server *server = NFS_SERVER(inode);
4942 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4943 struct nfs_setattrargs arg = {
4944 .fh = NFS_FH(inode),
4945 .iap = &sattr,
4946 .server = server,
4947 .bitmask = bitmask,
4948 .label = ilabel,
4949 };
4950 struct nfs_setattrres res = {
4951 .fattr = fattr,
4952 .label = olabel,
4953 .server = server,
4954 };
4955 struct rpc_message msg = {
4956 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
4957 .rpc_argp = &arg,
4958 .rpc_resp = &res,
4959 };
4960 int status;
4961
4962 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
4963
4964 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4965 if (status)
4966 dprintk("%s failed: %d\n", __func__, status);
4967
4968 return status;
4969 }
4970
4971 static int nfs4_do_set_security_label(struct inode *inode,
4972 struct nfs4_label *ilabel,
4973 struct nfs_fattr *fattr,
4974 struct nfs4_label *olabel)
4975 {
4976 struct nfs4_exception exception = { };
4977 int err;
4978
4979 do {
4980 err = _nfs4_do_set_security_label(inode, ilabel,
4981 fattr, olabel);
4982 trace_nfs4_set_security_label(inode, err);
4983 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4984 &exception);
4985 } while (exception.retry);
4986 return err;
4987 }
4988
4989 static int
4990 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
4991 {
4992 struct nfs4_label ilabel, *olabel = NULL;
4993 struct nfs_fattr fattr;
4994 struct rpc_cred *cred;
4995 struct inode *inode = d_inode(dentry);
4996 int status;
4997
4998 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4999 return -EOPNOTSUPP;
5000
5001 nfs_fattr_init(&fattr);
5002
5003 ilabel.pi = 0;
5004 ilabel.lfs = 0;
5005 ilabel.label = (char *)buf;
5006 ilabel.len = buflen;
5007
5008 cred = rpc_lookup_cred();
5009 if (IS_ERR(cred))
5010 return PTR_ERR(cred);
5011
5012 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
5013 if (IS_ERR(olabel)) {
5014 status = -PTR_ERR(olabel);
5015 goto out;
5016 }
5017
5018 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
5019 if (status == 0)
5020 nfs_setsecurity(inode, &fattr, olabel);
5021
5022 nfs4_label_free(olabel);
5023 out:
5024 put_rpccred(cred);
5025 return status;
5026 }
5027 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
5028
5029
5030 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
5031 nfs4_verifier *bootverf)
5032 {
5033 __be32 verf[2];
5034
5035 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
5036 /* An impossible timestamp guarantees this value
5037 * will never match a generated boot time. */
5038 verf[0] = 0;
5039 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
5040 } else {
5041 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
5042 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
5043 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
5044 }
5045 memcpy(bootverf->data, verf, sizeof(bootverf->data));
5046 }
5047
5048 static int
5049 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
5050 {
5051 int result;
5052 size_t len;
5053 char *str;
5054
5055 if (clp->cl_owner_id != NULL)
5056 return 0;
5057
5058 rcu_read_lock();
5059 len = 14 + strlen(clp->cl_ipaddr) + 1 +
5060 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
5061 1 +
5062 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
5063 1;
5064 rcu_read_unlock();
5065
5066 if (len > NFS4_OPAQUE_LIMIT + 1)
5067 return -EINVAL;
5068
5069 /*
5070 * Since this string is allocated at mount time, and held until the
5071 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5072 * about a memory-reclaim deadlock.
5073 */
5074 str = kmalloc(len, GFP_KERNEL);
5075 if (!str)
5076 return -ENOMEM;
5077
5078 rcu_read_lock();
5079 result = scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
5080 clp->cl_ipaddr,
5081 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
5082 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
5083 rcu_read_unlock();
5084
5085 clp->cl_owner_id = str;
5086 return 0;
5087 }
5088
5089 static int
5090 nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5091 {
5092 int result;
5093 size_t len;
5094 char *str;
5095
5096 len = 10 + 10 + 1 + 10 + 1 +
5097 strlen(nfs4_client_id_uniquifier) + 1 +
5098 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5099
5100 if (len > NFS4_OPAQUE_LIMIT + 1)
5101 return -EINVAL;
5102
5103 /*
5104 * Since this string is allocated at mount time, and held until the
5105 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5106 * about a memory-reclaim deadlock.
5107 */
5108 str = kmalloc(len, GFP_KERNEL);
5109 if (!str)
5110 return -ENOMEM;
5111
5112 result = scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5113 clp->rpc_ops->version, clp->cl_minorversion,
5114 nfs4_client_id_uniquifier,
5115 clp->cl_rpcclient->cl_nodename);
5116 clp->cl_owner_id = str;
5117 return 0;
5118 }
5119
5120 static int
5121 nfs4_init_uniform_client_string(struct nfs_client *clp)
5122 {
5123 int result;
5124 size_t len;
5125 char *str;
5126
5127 if (clp->cl_owner_id != NULL)
5128 return 0;
5129
5130 if (nfs4_client_id_uniquifier[0] != '\0')
5131 return nfs4_init_uniquifier_client_string(clp);
5132
5133 len = 10 + 10 + 1 + 10 + 1 +
5134 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5135
5136 if (len > NFS4_OPAQUE_LIMIT + 1)
5137 return -EINVAL;
5138
5139 /*
5140 * Since this string is allocated at mount time, and held until the
5141 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5142 * about a memory-reclaim deadlock.
5143 */
5144 str = kmalloc(len, GFP_KERNEL);
5145 if (!str)
5146 return -ENOMEM;
5147
5148 result = scnprintf(str, len, "Linux NFSv%u.%u %s",
5149 clp->rpc_ops->version, clp->cl_minorversion,
5150 clp->cl_rpcclient->cl_nodename);
5151 clp->cl_owner_id = str;
5152 return 0;
5153 }
5154
5155 /*
5156 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5157 * services. Advertise one based on the address family of the
5158 * clientaddr.
5159 */
5160 static unsigned int
5161 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5162 {
5163 if (strchr(clp->cl_ipaddr, ':') != NULL)
5164 return scnprintf(buf, len, "tcp6");
5165 else
5166 return scnprintf(buf, len, "tcp");
5167 }
5168
5169 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5170 {
5171 struct nfs4_setclientid *sc = calldata;
5172
5173 if (task->tk_status == 0)
5174 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5175 }
5176
5177 static const struct rpc_call_ops nfs4_setclientid_ops = {
5178 .rpc_call_done = nfs4_setclientid_done,
5179 };
5180
5181 /**
5182 * nfs4_proc_setclientid - Negotiate client ID
5183 * @clp: state data structure
5184 * @program: RPC program for NFSv4 callback service
5185 * @port: IP port number for NFS4 callback service
5186 * @cred: RPC credential to use for this call
5187 * @res: where to place the result
5188 *
5189 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5190 */
5191 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5192 unsigned short port, struct rpc_cred *cred,
5193 struct nfs4_setclientid_res *res)
5194 {
5195 nfs4_verifier sc_verifier;
5196 struct nfs4_setclientid setclientid = {
5197 .sc_verifier = &sc_verifier,
5198 .sc_prog = program,
5199 .sc_clnt = clp,
5200 };
5201 struct rpc_message msg = {
5202 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5203 .rpc_argp = &setclientid,
5204 .rpc_resp = res,
5205 .rpc_cred = cred,
5206 };
5207 struct rpc_task *task;
5208 struct rpc_task_setup task_setup_data = {
5209 .rpc_client = clp->cl_rpcclient,
5210 .rpc_message = &msg,
5211 .callback_ops = &nfs4_setclientid_ops,
5212 .callback_data = &setclientid,
5213 .flags = RPC_TASK_TIMEOUT,
5214 };
5215 int status;
5216
5217 /* nfs_client_id4 */
5218 nfs4_init_boot_verifier(clp, &sc_verifier);
5219
5220 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5221 status = nfs4_init_uniform_client_string(clp);
5222 else
5223 status = nfs4_init_nonuniform_client_string(clp);
5224
5225 if (status)
5226 goto out;
5227
5228 /* cb_client4 */
5229 setclientid.sc_netid_len =
5230 nfs4_init_callback_netid(clp,
5231 setclientid.sc_netid,
5232 sizeof(setclientid.sc_netid));
5233 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5234 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5235 clp->cl_ipaddr, port >> 8, port & 255);
5236
5237 dprintk("NFS call setclientid auth=%s, '%s'\n",
5238 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5239 clp->cl_owner_id);
5240 task = rpc_run_task(&task_setup_data);
5241 if (IS_ERR(task)) {
5242 status = PTR_ERR(task);
5243 goto out;
5244 }
5245 status = task->tk_status;
5246 if (setclientid.sc_cred) {
5247 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5248 put_rpccred(setclientid.sc_cred);
5249 }
5250 rpc_put_task(task);
5251 out:
5252 trace_nfs4_setclientid(clp, status);
5253 dprintk("NFS reply setclientid: %d\n", status);
5254 return status;
5255 }
5256
5257 /**
5258 * nfs4_proc_setclientid_confirm - Confirm client ID
5259 * @clp: state data structure
5260 * @res: result of a previous SETCLIENTID
5261 * @cred: RPC credential to use for this call
5262 *
5263 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5264 */
5265 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5266 struct nfs4_setclientid_res *arg,
5267 struct rpc_cred *cred)
5268 {
5269 struct rpc_message msg = {
5270 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5271 .rpc_argp = arg,
5272 .rpc_cred = cred,
5273 };
5274 int status;
5275
5276 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5277 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5278 clp->cl_clientid);
5279 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5280 trace_nfs4_setclientid_confirm(clp, status);
5281 dprintk("NFS reply setclientid_confirm: %d\n", status);
5282 return status;
5283 }
5284
5285 struct nfs4_delegreturndata {
5286 struct nfs4_delegreturnargs args;
5287 struct nfs4_delegreturnres res;
5288 struct nfs_fh fh;
5289 nfs4_stateid stateid;
5290 unsigned long timestamp;
5291 struct nfs_fattr fattr;
5292 int rpc_status;
5293 struct inode *inode;
5294 bool roc;
5295 u32 roc_barrier;
5296 };
5297
5298 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5299 {
5300 struct nfs4_delegreturndata *data = calldata;
5301
5302 if (!nfs4_sequence_done(task, &data->res.seq_res))
5303 return;
5304
5305 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5306 switch (task->tk_status) {
5307 case 0:
5308 renew_lease(data->res.server, data->timestamp);
5309 case -NFS4ERR_ADMIN_REVOKED:
5310 case -NFS4ERR_DELEG_REVOKED:
5311 case -NFS4ERR_BAD_STATEID:
5312 case -NFS4ERR_OLD_STATEID:
5313 case -NFS4ERR_STALE_STATEID:
5314 case -NFS4ERR_EXPIRED:
5315 task->tk_status = 0;
5316 if (data->roc)
5317 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5318 break;
5319 default:
5320 if (nfs4_async_handle_error(task, data->res.server,
5321 NULL, NULL) == -EAGAIN) {
5322 rpc_restart_call_prepare(task);
5323 return;
5324 }
5325 }
5326 data->rpc_status = task->tk_status;
5327 }
5328
5329 static void nfs4_delegreturn_release(void *calldata)
5330 {
5331 struct nfs4_delegreturndata *data = calldata;
5332 struct inode *inode = data->inode;
5333
5334 if (inode) {
5335 if (data->roc)
5336 pnfs_roc_release(inode);
5337 nfs_iput_and_deactive(inode);
5338 }
5339 kfree(calldata);
5340 }
5341
5342 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5343 {
5344 struct nfs4_delegreturndata *d_data;
5345
5346 d_data = (struct nfs4_delegreturndata *)data;
5347
5348 if (nfs4_wait_on_layoutreturn(d_data->inode, task))
5349 return;
5350
5351 if (d_data->roc)
5352 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
5353
5354 nfs4_setup_sequence(d_data->res.server,
5355 &d_data->args.seq_args,
5356 &d_data->res.seq_res,
5357 task);
5358 }
5359
5360 static const struct rpc_call_ops nfs4_delegreturn_ops = {
5361 .rpc_call_prepare = nfs4_delegreturn_prepare,
5362 .rpc_call_done = nfs4_delegreturn_done,
5363 .rpc_release = nfs4_delegreturn_release,
5364 };
5365
5366 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5367 {
5368 struct nfs4_delegreturndata *data;
5369 struct nfs_server *server = NFS_SERVER(inode);
5370 struct rpc_task *task;
5371 struct rpc_message msg = {
5372 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5373 .rpc_cred = cred,
5374 };
5375 struct rpc_task_setup task_setup_data = {
5376 .rpc_client = server->client,
5377 .rpc_message = &msg,
5378 .callback_ops = &nfs4_delegreturn_ops,
5379 .flags = RPC_TASK_ASYNC,
5380 };
5381 int status = 0;
5382
5383 data = kzalloc(sizeof(*data), GFP_NOFS);
5384 if (data == NULL)
5385 return -ENOMEM;
5386 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5387 data->args.fhandle = &data->fh;
5388 data->args.stateid = &data->stateid;
5389 data->args.bitmask = server->cache_consistency_bitmask;
5390 nfs_copy_fh(&data->fh, NFS_FH(inode));
5391 nfs4_stateid_copy(&data->stateid, stateid);
5392 data->res.fattr = &data->fattr;
5393 data->res.server = server;
5394 nfs_fattr_init(data->res.fattr);
5395 data->timestamp = jiffies;
5396 data->rpc_status = 0;
5397 data->inode = nfs_igrab_and_active(inode);
5398 if (data->inode)
5399 data->roc = nfs4_roc(inode);
5400
5401 task_setup_data.callback_data = data;
5402 msg.rpc_argp = &data->args;
5403 msg.rpc_resp = &data->res;
5404 task = rpc_run_task(&task_setup_data);
5405 if (IS_ERR(task))
5406 return PTR_ERR(task);
5407 if (!issync)
5408 goto out;
5409 status = nfs4_wait_for_completion_rpc_task(task);
5410 if (status != 0)
5411 goto out;
5412 status = data->rpc_status;
5413 if (status == 0)
5414 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5415 else
5416 nfs_refresh_inode(inode, &data->fattr);
5417 out:
5418 rpc_put_task(task);
5419 return status;
5420 }
5421
5422 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5423 {
5424 struct nfs_server *server = NFS_SERVER(inode);
5425 struct nfs4_exception exception = { };
5426 int err;
5427 do {
5428 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5429 trace_nfs4_delegreturn(inode, err);
5430 switch (err) {
5431 case -NFS4ERR_STALE_STATEID:
5432 case -NFS4ERR_EXPIRED:
5433 case 0:
5434 return 0;
5435 }
5436 err = nfs4_handle_exception(server, err, &exception);
5437 } while (exception.retry);
5438 return err;
5439 }
5440
5441 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5442 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5443
5444 /*
5445 * sleep, with exponential backoff, and retry the LOCK operation.
5446 */
5447 static unsigned long
5448 nfs4_set_lock_task_retry(unsigned long timeout)
5449 {
5450 freezable_schedule_timeout_killable_unsafe(timeout);
5451 timeout <<= 1;
5452 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5453 return NFS4_LOCK_MAXTIMEOUT;
5454 return timeout;
5455 }
5456
5457 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5458 {
5459 struct inode *inode = state->inode;
5460 struct nfs_server *server = NFS_SERVER(inode);
5461 struct nfs_client *clp = server->nfs_client;
5462 struct nfs_lockt_args arg = {
5463 .fh = NFS_FH(inode),
5464 .fl = request,
5465 };
5466 struct nfs_lockt_res res = {
5467 .denied = request,
5468 };
5469 struct rpc_message msg = {
5470 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5471 .rpc_argp = &arg,
5472 .rpc_resp = &res,
5473 .rpc_cred = state->owner->so_cred,
5474 };
5475 struct nfs4_lock_state *lsp;
5476 int status;
5477
5478 arg.lock_owner.clientid = clp->cl_clientid;
5479 status = nfs4_set_lock_state(state, request);
5480 if (status != 0)
5481 goto out;
5482 lsp = request->fl_u.nfs4_fl.owner;
5483 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5484 arg.lock_owner.s_dev = server->s_dev;
5485 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5486 switch (status) {
5487 case 0:
5488 request->fl_type = F_UNLCK;
5489 break;
5490 case -NFS4ERR_DENIED:
5491 status = 0;
5492 }
5493 request->fl_ops->fl_release_private(request);
5494 request->fl_ops = NULL;
5495 out:
5496 return status;
5497 }
5498
5499 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5500 {
5501 struct nfs4_exception exception = { };
5502 int err;
5503
5504 do {
5505 err = _nfs4_proc_getlk(state, cmd, request);
5506 trace_nfs4_get_lock(request, state, cmd, err);
5507 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5508 &exception);
5509 } while (exception.retry);
5510 return err;
5511 }
5512
5513 static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
5514 {
5515 return locks_lock_inode_wait(inode, fl);
5516 }
5517
5518 struct nfs4_unlockdata {
5519 struct nfs_locku_args arg;
5520 struct nfs_locku_res res;
5521 struct nfs4_lock_state *lsp;
5522 struct nfs_open_context *ctx;
5523 struct file_lock fl;
5524 struct nfs_server *server;
5525 unsigned long timestamp;
5526 };
5527
5528 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5529 struct nfs_open_context *ctx,
5530 struct nfs4_lock_state *lsp,
5531 struct nfs_seqid *seqid)
5532 {
5533 struct nfs4_unlockdata *p;
5534 struct inode *inode = lsp->ls_state->inode;
5535
5536 p = kzalloc(sizeof(*p), GFP_NOFS);
5537 if (p == NULL)
5538 return NULL;
5539 p->arg.fh = NFS_FH(inode);
5540 p->arg.fl = &p->fl;
5541 p->arg.seqid = seqid;
5542 p->res.seqid = seqid;
5543 p->lsp = lsp;
5544 atomic_inc(&lsp->ls_count);
5545 /* Ensure we don't close file until we're done freeing locks! */
5546 p->ctx = get_nfs_open_context(ctx);
5547 memcpy(&p->fl, fl, sizeof(p->fl));
5548 p->server = NFS_SERVER(inode);
5549 return p;
5550 }
5551
5552 static void nfs4_locku_release_calldata(void *data)
5553 {
5554 struct nfs4_unlockdata *calldata = data;
5555 nfs_free_seqid(calldata->arg.seqid);
5556 nfs4_put_lock_state(calldata->lsp);
5557 put_nfs_open_context(calldata->ctx);
5558 kfree(calldata);
5559 }
5560
5561 static void nfs4_locku_done(struct rpc_task *task, void *data)
5562 {
5563 struct nfs4_unlockdata *calldata = data;
5564
5565 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5566 return;
5567 switch (task->tk_status) {
5568 case 0:
5569 renew_lease(calldata->server, calldata->timestamp);
5570 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
5571 if (nfs4_update_lock_stateid(calldata->lsp,
5572 &calldata->res.stateid))
5573 break;
5574 case -NFS4ERR_BAD_STATEID:
5575 case -NFS4ERR_OLD_STATEID:
5576 case -NFS4ERR_STALE_STATEID:
5577 case -NFS4ERR_EXPIRED:
5578 if (!nfs4_stateid_match(&calldata->arg.stateid,
5579 &calldata->lsp->ls_stateid))
5580 rpc_restart_call_prepare(task);
5581 break;
5582 default:
5583 if (nfs4_async_handle_error(task, calldata->server,
5584 NULL, NULL) == -EAGAIN)
5585 rpc_restart_call_prepare(task);
5586 }
5587 nfs_release_seqid(calldata->arg.seqid);
5588 }
5589
5590 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5591 {
5592 struct nfs4_unlockdata *calldata = data;
5593
5594 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5595 goto out_wait;
5596 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5597 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5598 /* Note: exit _without_ running nfs4_locku_done */
5599 goto out_no_action;
5600 }
5601 calldata->timestamp = jiffies;
5602 if (nfs4_setup_sequence(calldata->server,
5603 &calldata->arg.seq_args,
5604 &calldata->res.seq_res,
5605 task) != 0)
5606 nfs_release_seqid(calldata->arg.seqid);
5607 return;
5608 out_no_action:
5609 task->tk_action = NULL;
5610 out_wait:
5611 nfs4_sequence_done(task, &calldata->res.seq_res);
5612 }
5613
5614 static const struct rpc_call_ops nfs4_locku_ops = {
5615 .rpc_call_prepare = nfs4_locku_prepare,
5616 .rpc_call_done = nfs4_locku_done,
5617 .rpc_release = nfs4_locku_release_calldata,
5618 };
5619
5620 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5621 struct nfs_open_context *ctx,
5622 struct nfs4_lock_state *lsp,
5623 struct nfs_seqid *seqid)
5624 {
5625 struct nfs4_unlockdata *data;
5626 struct rpc_message msg = {
5627 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5628 .rpc_cred = ctx->cred,
5629 };
5630 struct rpc_task_setup task_setup_data = {
5631 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5632 .rpc_message = &msg,
5633 .callback_ops = &nfs4_locku_ops,
5634 .workqueue = nfsiod_workqueue,
5635 .flags = RPC_TASK_ASYNC,
5636 };
5637
5638 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5639 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5640
5641 /* Ensure this is an unlock - when canceling a lock, the
5642 * canceled lock is passed in, and it won't be an unlock.
5643 */
5644 fl->fl_type = F_UNLCK;
5645
5646 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5647 if (data == NULL) {
5648 nfs_free_seqid(seqid);
5649 return ERR_PTR(-ENOMEM);
5650 }
5651
5652 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5653 msg.rpc_argp = &data->arg;
5654 msg.rpc_resp = &data->res;
5655 task_setup_data.callback_data = data;
5656 return rpc_run_task(&task_setup_data);
5657 }
5658
5659 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5660 {
5661 struct inode *inode = state->inode;
5662 struct nfs4_state_owner *sp = state->owner;
5663 struct nfs_inode *nfsi = NFS_I(inode);
5664 struct nfs_seqid *seqid;
5665 struct nfs4_lock_state *lsp;
5666 struct rpc_task *task;
5667 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5668 int status = 0;
5669 unsigned char fl_flags = request->fl_flags;
5670
5671 status = nfs4_set_lock_state(state, request);
5672 /* Unlock _before_ we do the RPC call */
5673 request->fl_flags |= FL_EXISTS;
5674 /* Exclude nfs_delegation_claim_locks() */
5675 mutex_lock(&sp->so_delegreturn_mutex);
5676 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5677 down_read(&nfsi->rwsem);
5678 if (do_vfs_lock(inode, request) == -ENOENT) {
5679 up_read(&nfsi->rwsem);
5680 mutex_unlock(&sp->so_delegreturn_mutex);
5681 goto out;
5682 }
5683 up_read(&nfsi->rwsem);
5684 mutex_unlock(&sp->so_delegreturn_mutex);
5685 if (status != 0)
5686 goto out;
5687 /* Is this a delegated lock? */
5688 lsp = request->fl_u.nfs4_fl.owner;
5689 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5690 goto out;
5691 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5692 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5693 status = -ENOMEM;
5694 if (IS_ERR(seqid))
5695 goto out;
5696 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5697 status = PTR_ERR(task);
5698 if (IS_ERR(task))
5699 goto out;
5700 status = nfs4_wait_for_completion_rpc_task(task);
5701 rpc_put_task(task);
5702 out:
5703 request->fl_flags = fl_flags;
5704 trace_nfs4_unlock(request, state, F_SETLK, status);
5705 return status;
5706 }
5707
5708 struct nfs4_lockdata {
5709 struct nfs_lock_args arg;
5710 struct nfs_lock_res res;
5711 struct nfs4_lock_state *lsp;
5712 struct nfs_open_context *ctx;
5713 struct file_lock fl;
5714 unsigned long timestamp;
5715 int rpc_status;
5716 int cancelled;
5717 struct nfs_server *server;
5718 };
5719
5720 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5721 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5722 gfp_t gfp_mask)
5723 {
5724 struct nfs4_lockdata *p;
5725 struct inode *inode = lsp->ls_state->inode;
5726 struct nfs_server *server = NFS_SERVER(inode);
5727 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5728
5729 p = kzalloc(sizeof(*p), gfp_mask);
5730 if (p == NULL)
5731 return NULL;
5732
5733 p->arg.fh = NFS_FH(inode);
5734 p->arg.fl = &p->fl;
5735 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5736 if (IS_ERR(p->arg.open_seqid))
5737 goto out_free;
5738 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5739 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5740 if (IS_ERR(p->arg.lock_seqid))
5741 goto out_free_seqid;
5742 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5743 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5744 p->arg.lock_owner.s_dev = server->s_dev;
5745 p->res.lock_seqid = p->arg.lock_seqid;
5746 p->lsp = lsp;
5747 p->server = server;
5748 atomic_inc(&lsp->ls_count);
5749 p->ctx = get_nfs_open_context(ctx);
5750 get_file(fl->fl_file);
5751 memcpy(&p->fl, fl, sizeof(p->fl));
5752 return p;
5753 out_free_seqid:
5754 nfs_free_seqid(p->arg.open_seqid);
5755 out_free:
5756 kfree(p);
5757 return NULL;
5758 }
5759
5760 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5761 {
5762 struct nfs4_lockdata *data = calldata;
5763 struct nfs4_state *state = data->lsp->ls_state;
5764
5765 dprintk("%s: begin!\n", __func__);
5766 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5767 goto out_wait;
5768 /* Do we need to do an open_to_lock_owner? */
5769 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5770 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5771 goto out_release_lock_seqid;
5772 }
5773 nfs4_stateid_copy(&data->arg.open_stateid,
5774 &state->open_stateid);
5775 data->arg.new_lock_owner = 1;
5776 data->res.open_seqid = data->arg.open_seqid;
5777 } else {
5778 data->arg.new_lock_owner = 0;
5779 nfs4_stateid_copy(&data->arg.lock_stateid,
5780 &data->lsp->ls_stateid);
5781 }
5782 if (!nfs4_valid_open_stateid(state)) {
5783 data->rpc_status = -EBADF;
5784 task->tk_action = NULL;
5785 goto out_release_open_seqid;
5786 }
5787 data->timestamp = jiffies;
5788 if (nfs4_setup_sequence(data->server,
5789 &data->arg.seq_args,
5790 &data->res.seq_res,
5791 task) == 0)
5792 return;
5793 out_release_open_seqid:
5794 nfs_release_seqid(data->arg.open_seqid);
5795 out_release_lock_seqid:
5796 nfs_release_seqid(data->arg.lock_seqid);
5797 out_wait:
5798 nfs4_sequence_done(task, &data->res.seq_res);
5799 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5800 }
5801
5802 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5803 {
5804 struct nfs4_lockdata *data = calldata;
5805 struct nfs4_lock_state *lsp = data->lsp;
5806
5807 dprintk("%s: begin!\n", __func__);
5808
5809 if (!nfs4_sequence_done(task, &data->res.seq_res))
5810 return;
5811
5812 data->rpc_status = task->tk_status;
5813 switch (task->tk_status) {
5814 case 0:
5815 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
5816 data->timestamp);
5817 if (data->arg.new_lock) {
5818 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5819 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
5820 rpc_restart_call_prepare(task);
5821 break;
5822 }
5823 }
5824 if (data->arg.new_lock_owner != 0) {
5825 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5826 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5827 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5828 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5829 rpc_restart_call_prepare(task);
5830 break;
5831 case -NFS4ERR_BAD_STATEID:
5832 case -NFS4ERR_OLD_STATEID:
5833 case -NFS4ERR_STALE_STATEID:
5834 case -NFS4ERR_EXPIRED:
5835 if (data->arg.new_lock_owner != 0) {
5836 if (!nfs4_stateid_match(&data->arg.open_stateid,
5837 &lsp->ls_state->open_stateid))
5838 rpc_restart_call_prepare(task);
5839 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5840 &lsp->ls_stateid))
5841 rpc_restart_call_prepare(task);
5842 }
5843 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5844 }
5845
5846 static void nfs4_lock_release(void *calldata)
5847 {
5848 struct nfs4_lockdata *data = calldata;
5849
5850 dprintk("%s: begin!\n", __func__);
5851 nfs_free_seqid(data->arg.open_seqid);
5852 if (data->cancelled != 0) {
5853 struct rpc_task *task;
5854 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5855 data->arg.lock_seqid);
5856 if (!IS_ERR(task))
5857 rpc_put_task_async(task);
5858 dprintk("%s: cancelling lock!\n", __func__);
5859 } else
5860 nfs_free_seqid(data->arg.lock_seqid);
5861 nfs4_put_lock_state(data->lsp);
5862 put_nfs_open_context(data->ctx);
5863 fput(data->fl.fl_file);
5864 kfree(data);
5865 dprintk("%s: done!\n", __func__);
5866 }
5867
5868 static const struct rpc_call_ops nfs4_lock_ops = {
5869 .rpc_call_prepare = nfs4_lock_prepare,
5870 .rpc_call_done = nfs4_lock_done,
5871 .rpc_release = nfs4_lock_release,
5872 };
5873
5874 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5875 {
5876 switch (error) {
5877 case -NFS4ERR_ADMIN_REVOKED:
5878 case -NFS4ERR_BAD_STATEID:
5879 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5880 if (new_lock_owner != 0 ||
5881 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5882 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5883 break;
5884 case -NFS4ERR_STALE_STATEID:
5885 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5886 case -NFS4ERR_EXPIRED:
5887 nfs4_schedule_lease_recovery(server->nfs_client);
5888 };
5889 }
5890
5891 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5892 {
5893 struct nfs4_lockdata *data;
5894 struct rpc_task *task;
5895 struct rpc_message msg = {
5896 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5897 .rpc_cred = state->owner->so_cred,
5898 };
5899 struct rpc_task_setup task_setup_data = {
5900 .rpc_client = NFS_CLIENT(state->inode),
5901 .rpc_message = &msg,
5902 .callback_ops = &nfs4_lock_ops,
5903 .workqueue = nfsiod_workqueue,
5904 .flags = RPC_TASK_ASYNC,
5905 };
5906 int ret;
5907
5908 dprintk("%s: begin!\n", __func__);
5909 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
5910 fl->fl_u.nfs4_fl.owner,
5911 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
5912 if (data == NULL)
5913 return -ENOMEM;
5914 if (IS_SETLKW(cmd))
5915 data->arg.block = 1;
5916 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5917 msg.rpc_argp = &data->arg;
5918 msg.rpc_resp = &data->res;
5919 task_setup_data.callback_data = data;
5920 if (recovery_type > NFS_LOCK_NEW) {
5921 if (recovery_type == NFS_LOCK_RECLAIM)
5922 data->arg.reclaim = NFS_LOCK_RECLAIM;
5923 nfs4_set_sequence_privileged(&data->arg.seq_args);
5924 } else
5925 data->arg.new_lock = 1;
5926 task = rpc_run_task(&task_setup_data);
5927 if (IS_ERR(task))
5928 return PTR_ERR(task);
5929 ret = nfs4_wait_for_completion_rpc_task(task);
5930 if (ret == 0) {
5931 ret = data->rpc_status;
5932 if (ret)
5933 nfs4_handle_setlk_error(data->server, data->lsp,
5934 data->arg.new_lock_owner, ret);
5935 } else
5936 data->cancelled = 1;
5937 rpc_put_task(task);
5938 dprintk("%s: done, ret = %d!\n", __func__, ret);
5939 return ret;
5940 }
5941
5942 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
5943 {
5944 struct nfs_server *server = NFS_SERVER(state->inode);
5945 struct nfs4_exception exception = {
5946 .inode = state->inode,
5947 };
5948 int err;
5949
5950 do {
5951 /* Cache the lock if possible... */
5952 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5953 return 0;
5954 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5955 trace_nfs4_lock_reclaim(request, state, F_SETLK, err);
5956 if (err != -NFS4ERR_DELAY)
5957 break;
5958 nfs4_handle_exception(server, err, &exception);
5959 } while (exception.retry);
5960 return err;
5961 }
5962
5963 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5964 {
5965 struct nfs_server *server = NFS_SERVER(state->inode);
5966 struct nfs4_exception exception = {
5967 .inode = state->inode,
5968 };
5969 int err;
5970
5971 err = nfs4_set_lock_state(state, request);
5972 if (err != 0)
5973 return err;
5974 if (!recover_lost_locks) {
5975 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
5976 return 0;
5977 }
5978 do {
5979 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5980 return 0;
5981 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5982 trace_nfs4_lock_expired(request, state, F_SETLK, err);
5983 switch (err) {
5984 default:
5985 goto out;
5986 case -NFS4ERR_GRACE:
5987 case -NFS4ERR_DELAY:
5988 nfs4_handle_exception(server, err, &exception);
5989 err = 0;
5990 }
5991 } while (exception.retry);
5992 out:
5993 return err;
5994 }
5995
5996 #if defined(CONFIG_NFS_V4_1)
5997 /**
5998 * nfs41_check_expired_locks - possibly free a lock stateid
5999 *
6000 * @state: NFSv4 state for an inode
6001 *
6002 * Returns NFS_OK if recovery for this stateid is now finished.
6003 * Otherwise a negative NFS4ERR value is returned.
6004 */
6005 static int nfs41_check_expired_locks(struct nfs4_state *state)
6006 {
6007 int status, ret = -NFS4ERR_BAD_STATEID;
6008 struct nfs4_lock_state *lsp;
6009 struct nfs_server *server = NFS_SERVER(state->inode);
6010
6011 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
6012 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
6013 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
6014
6015 status = nfs41_test_stateid(server,
6016 &lsp->ls_stateid,
6017 cred);
6018 trace_nfs4_test_lock_stateid(state, lsp, status);
6019 if (status != NFS_OK) {
6020 /* Free the stateid unless the server
6021 * informs us the stateid is unrecognized. */
6022 if (status != -NFS4ERR_BAD_STATEID)
6023 nfs41_free_stateid(server,
6024 &lsp->ls_stateid,
6025 cred);
6026 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6027 ret = status;
6028 }
6029 }
6030 };
6031
6032 return ret;
6033 }
6034
6035 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
6036 {
6037 int status = NFS_OK;
6038
6039 if (test_bit(LK_STATE_IN_USE, &state->flags))
6040 status = nfs41_check_expired_locks(state);
6041 if (status != NFS_OK)
6042 status = nfs4_lock_expired(state, request);
6043 return status;
6044 }
6045 #endif
6046
6047 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6048 {
6049 struct nfs_inode *nfsi = NFS_I(state->inode);
6050 unsigned char fl_flags = request->fl_flags;
6051 int status = -ENOLCK;
6052
6053 if ((fl_flags & FL_POSIX) &&
6054 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6055 goto out;
6056 /* Is this a delegated open? */
6057 status = nfs4_set_lock_state(state, request);
6058 if (status != 0)
6059 goto out;
6060 request->fl_flags |= FL_ACCESS;
6061 status = do_vfs_lock(state->inode, request);
6062 if (status < 0)
6063 goto out;
6064 down_read(&nfsi->rwsem);
6065 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6066 /* Yes: cache locks! */
6067 /* ...but avoid races with delegation recall... */
6068 request->fl_flags = fl_flags & ~FL_SLEEP;
6069 status = do_vfs_lock(state->inode, request);
6070 up_read(&nfsi->rwsem);
6071 goto out;
6072 }
6073 up_read(&nfsi->rwsem);
6074 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6075 out:
6076 request->fl_flags = fl_flags;
6077 return status;
6078 }
6079
6080 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6081 {
6082 struct nfs4_exception exception = {
6083 .state = state,
6084 .inode = state->inode,
6085 };
6086 int err;
6087
6088 do {
6089 err = _nfs4_proc_setlk(state, cmd, request);
6090 trace_nfs4_set_lock(request, state, cmd, err);
6091 if (err == -NFS4ERR_DENIED)
6092 err = -EAGAIN;
6093 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6094 err, &exception);
6095 } while (exception.retry);
6096 return err;
6097 }
6098
6099 static int
6100 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6101 {
6102 struct nfs_open_context *ctx;
6103 struct nfs4_state *state;
6104 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6105 int status;
6106
6107 /* verify open state */
6108 ctx = nfs_file_open_context(filp);
6109 state = ctx->state;
6110
6111 if (request->fl_start < 0 || request->fl_end < 0)
6112 return -EINVAL;
6113
6114 if (IS_GETLK(cmd)) {
6115 if (state != NULL)
6116 return nfs4_proc_getlk(state, F_GETLK, request);
6117 return 0;
6118 }
6119
6120 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6121 return -EINVAL;
6122
6123 if (request->fl_type == F_UNLCK) {
6124 if (state != NULL)
6125 return nfs4_proc_unlck(state, cmd, request);
6126 return 0;
6127 }
6128
6129 if (state == NULL)
6130 return -ENOLCK;
6131 /*
6132 * Don't rely on the VFS having checked the file open mode,
6133 * since it won't do this for flock() locks.
6134 */
6135 switch (request->fl_type) {
6136 case F_RDLCK:
6137 if (!(filp->f_mode & FMODE_READ))
6138 return -EBADF;
6139 break;
6140 case F_WRLCK:
6141 if (!(filp->f_mode & FMODE_WRITE))
6142 return -EBADF;
6143 }
6144
6145 do {
6146 status = nfs4_proc_setlk(state, cmd, request);
6147 if ((status != -EAGAIN) || IS_SETLK(cmd))
6148 break;
6149 timeout = nfs4_set_lock_task_retry(timeout);
6150 status = -ERESTARTSYS;
6151 if (signalled())
6152 break;
6153 } while(status < 0);
6154 return status;
6155 }
6156
6157 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6158 {
6159 struct nfs_server *server = NFS_SERVER(state->inode);
6160 int err;
6161
6162 err = nfs4_set_lock_state(state, fl);
6163 if (err != 0)
6164 return err;
6165 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6166 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6167 }
6168
6169 struct nfs_release_lockowner_data {
6170 struct nfs4_lock_state *lsp;
6171 struct nfs_server *server;
6172 struct nfs_release_lockowner_args args;
6173 struct nfs_release_lockowner_res res;
6174 unsigned long timestamp;
6175 };
6176
6177 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6178 {
6179 struct nfs_release_lockowner_data *data = calldata;
6180 struct nfs_server *server = data->server;
6181 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6182 &data->args.seq_args, &data->res.seq_res, task);
6183 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6184 data->timestamp = jiffies;
6185 }
6186
6187 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6188 {
6189 struct nfs_release_lockowner_data *data = calldata;
6190 struct nfs_server *server = data->server;
6191
6192 nfs40_sequence_done(task, &data->res.seq_res);
6193
6194 switch (task->tk_status) {
6195 case 0:
6196 renew_lease(server, data->timestamp);
6197 break;
6198 case -NFS4ERR_STALE_CLIENTID:
6199 case -NFS4ERR_EXPIRED:
6200 nfs4_schedule_lease_recovery(server->nfs_client);
6201 break;
6202 case -NFS4ERR_LEASE_MOVED:
6203 case -NFS4ERR_DELAY:
6204 if (nfs4_async_handle_error(task, server,
6205 NULL, NULL) == -EAGAIN)
6206 rpc_restart_call_prepare(task);
6207 }
6208 }
6209
6210 static void nfs4_release_lockowner_release(void *calldata)
6211 {
6212 struct nfs_release_lockowner_data *data = calldata;
6213 nfs4_free_lock_state(data->server, data->lsp);
6214 kfree(calldata);
6215 }
6216
6217 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6218 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6219 .rpc_call_done = nfs4_release_lockowner_done,
6220 .rpc_release = nfs4_release_lockowner_release,
6221 };
6222
6223 static void
6224 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6225 {
6226 struct nfs_release_lockowner_data *data;
6227 struct rpc_message msg = {
6228 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6229 };
6230
6231 if (server->nfs_client->cl_mvops->minor_version != 0)
6232 return;
6233
6234 data = kmalloc(sizeof(*data), GFP_NOFS);
6235 if (!data)
6236 return;
6237 data->lsp = lsp;
6238 data->server = server;
6239 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6240 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6241 data->args.lock_owner.s_dev = server->s_dev;
6242
6243 msg.rpc_argp = &data->args;
6244 msg.rpc_resp = &data->res;
6245 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6246 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6247 }
6248
6249 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6250
6251 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
6252 struct dentry *dentry, const char *key,
6253 const void *buf, size_t buflen,
6254 int flags)
6255 {
6256 if (strcmp(key, "") != 0)
6257 return -EINVAL;
6258
6259 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
6260 }
6261
6262 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
6263 struct dentry *dentry, const char *key,
6264 void *buf, size_t buflen)
6265 {
6266 if (strcmp(key, "") != 0)
6267 return -EINVAL;
6268
6269 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen);
6270 }
6271
6272 static size_t nfs4_xattr_list_nfs4_acl(const struct xattr_handler *handler,
6273 struct dentry *dentry, char *list,
6274 size_t list_len, const char *name,
6275 size_t name_len)
6276 {
6277 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
6278
6279 if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))))
6280 return 0;
6281
6282 if (list && len <= list_len)
6283 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
6284 return len;
6285 }
6286
6287 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6288 static inline int nfs4_server_supports_labels(struct nfs_server *server)
6289 {
6290 return server->caps & NFS_CAP_SECURITY_LABEL;
6291 }
6292
6293 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
6294 struct dentry *dentry, const char *key,
6295 const void *buf, size_t buflen,
6296 int flags)
6297 {
6298 if (security_ismaclabel(key))
6299 return nfs4_set_security_label(dentry, buf, buflen);
6300
6301 return -EOPNOTSUPP;
6302 }
6303
6304 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
6305 struct dentry *dentry, const char *key,
6306 void *buf, size_t buflen)
6307 {
6308 if (security_ismaclabel(key))
6309 return nfs4_get_security_label(d_inode(dentry), buf, buflen);
6310 return -EOPNOTSUPP;
6311 }
6312
6313 static size_t nfs4_xattr_list_nfs4_label(const struct xattr_handler *handler,
6314 struct dentry *dentry, char *list,
6315 size_t list_len, const char *name,
6316 size_t name_len)
6317 {
6318 size_t len = 0;
6319
6320 if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
6321 len = security_inode_listsecurity(d_inode(dentry), NULL, 0);
6322 if (list && len <= list_len)
6323 security_inode_listsecurity(d_inode(dentry), list, len);
6324 }
6325 return len;
6326 }
6327
6328 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6329 .prefix = XATTR_SECURITY_PREFIX,
6330 .list = nfs4_xattr_list_nfs4_label,
6331 .get = nfs4_xattr_get_nfs4_label,
6332 .set = nfs4_xattr_set_nfs4_label,
6333 };
6334 #endif
6335
6336
6337 /*
6338 * nfs_fhget will use either the mounted_on_fileid or the fileid
6339 */
6340 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6341 {
6342 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6343 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6344 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6345 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6346 return;
6347
6348 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6349 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6350 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6351 fattr->nlink = 2;
6352 }
6353
6354 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6355 const struct qstr *name,
6356 struct nfs4_fs_locations *fs_locations,
6357 struct page *page)
6358 {
6359 struct nfs_server *server = NFS_SERVER(dir);
6360 u32 bitmask[3] = {
6361 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6362 };
6363 struct nfs4_fs_locations_arg args = {
6364 .dir_fh = NFS_FH(dir),
6365 .name = name,
6366 .page = page,
6367 .bitmask = bitmask,
6368 };
6369 struct nfs4_fs_locations_res res = {
6370 .fs_locations = fs_locations,
6371 };
6372 struct rpc_message msg = {
6373 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6374 .rpc_argp = &args,
6375 .rpc_resp = &res,
6376 };
6377 int status;
6378
6379 dprintk("%s: start\n", __func__);
6380
6381 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6382 * is not supported */
6383 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6384 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6385 else
6386 bitmask[0] |= FATTR4_WORD0_FILEID;
6387
6388 nfs_fattr_init(&fs_locations->fattr);
6389 fs_locations->server = server;
6390 fs_locations->nlocations = 0;
6391 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6392 dprintk("%s: returned status = %d\n", __func__, status);
6393 return status;
6394 }
6395
6396 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6397 const struct qstr *name,
6398 struct nfs4_fs_locations *fs_locations,
6399 struct page *page)
6400 {
6401 struct nfs4_exception exception = { };
6402 int err;
6403 do {
6404 err = _nfs4_proc_fs_locations(client, dir, name,
6405 fs_locations, page);
6406 trace_nfs4_get_fs_locations(dir, name, err);
6407 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6408 &exception);
6409 } while (exception.retry);
6410 return err;
6411 }
6412
6413 /*
6414 * This operation also signals the server that this client is
6415 * performing migration recovery. The server can stop returning
6416 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6417 * appended to this compound to identify the client ID which is
6418 * performing recovery.
6419 */
6420 static int _nfs40_proc_get_locations(struct inode *inode,
6421 struct nfs4_fs_locations *locations,
6422 struct page *page, struct rpc_cred *cred)
6423 {
6424 struct nfs_server *server = NFS_SERVER(inode);
6425 struct rpc_clnt *clnt = server->client;
6426 u32 bitmask[2] = {
6427 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6428 };
6429 struct nfs4_fs_locations_arg args = {
6430 .clientid = server->nfs_client->cl_clientid,
6431 .fh = NFS_FH(inode),
6432 .page = page,
6433 .bitmask = bitmask,
6434 .migration = 1, /* skip LOOKUP */
6435 .renew = 1, /* append RENEW */
6436 };
6437 struct nfs4_fs_locations_res res = {
6438 .fs_locations = locations,
6439 .migration = 1,
6440 .renew = 1,
6441 };
6442 struct rpc_message msg = {
6443 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6444 .rpc_argp = &args,
6445 .rpc_resp = &res,
6446 .rpc_cred = cred,
6447 };
6448 unsigned long now = jiffies;
6449 int status;
6450
6451 nfs_fattr_init(&locations->fattr);
6452 locations->server = server;
6453 locations->nlocations = 0;
6454
6455 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6456 nfs4_set_sequence_privileged(&args.seq_args);
6457 status = nfs4_call_sync_sequence(clnt, server, &msg,
6458 &args.seq_args, &res.seq_res);
6459 if (status)
6460 return status;
6461
6462 renew_lease(server, now);
6463 return 0;
6464 }
6465
6466 #ifdef CONFIG_NFS_V4_1
6467
6468 /*
6469 * This operation also signals the server that this client is
6470 * performing migration recovery. The server can stop asserting
6471 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6472 * performing this operation is identified in the SEQUENCE
6473 * operation in this compound.
6474 *
6475 * When the client supports GETATTR(fs_locations_info), it can
6476 * be plumbed in here.
6477 */
6478 static int _nfs41_proc_get_locations(struct inode *inode,
6479 struct nfs4_fs_locations *locations,
6480 struct page *page, struct rpc_cred *cred)
6481 {
6482 struct nfs_server *server = NFS_SERVER(inode);
6483 struct rpc_clnt *clnt = server->client;
6484 u32 bitmask[2] = {
6485 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6486 };
6487 struct nfs4_fs_locations_arg args = {
6488 .fh = NFS_FH(inode),
6489 .page = page,
6490 .bitmask = bitmask,
6491 .migration = 1, /* skip LOOKUP */
6492 };
6493 struct nfs4_fs_locations_res res = {
6494 .fs_locations = locations,
6495 .migration = 1,
6496 };
6497 struct rpc_message msg = {
6498 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6499 .rpc_argp = &args,
6500 .rpc_resp = &res,
6501 .rpc_cred = cred,
6502 };
6503 int status;
6504
6505 nfs_fattr_init(&locations->fattr);
6506 locations->server = server;
6507 locations->nlocations = 0;
6508
6509 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6510 nfs4_set_sequence_privileged(&args.seq_args);
6511 status = nfs4_call_sync_sequence(clnt, server, &msg,
6512 &args.seq_args, &res.seq_res);
6513 if (status == NFS4_OK &&
6514 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6515 status = -NFS4ERR_LEASE_MOVED;
6516 return status;
6517 }
6518
6519 #endif /* CONFIG_NFS_V4_1 */
6520
6521 /**
6522 * nfs4_proc_get_locations - discover locations for a migrated FSID
6523 * @inode: inode on FSID that is migrating
6524 * @locations: result of query
6525 * @page: buffer
6526 * @cred: credential to use for this operation
6527 *
6528 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6529 * operation failed, or a negative errno if a local error occurred.
6530 *
6531 * On success, "locations" is filled in, but if the server has
6532 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6533 * asserted.
6534 *
6535 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6536 * from this client that require migration recovery.
6537 */
6538 int nfs4_proc_get_locations(struct inode *inode,
6539 struct nfs4_fs_locations *locations,
6540 struct page *page, struct rpc_cred *cred)
6541 {
6542 struct nfs_server *server = NFS_SERVER(inode);
6543 struct nfs_client *clp = server->nfs_client;
6544 const struct nfs4_mig_recovery_ops *ops =
6545 clp->cl_mvops->mig_recovery_ops;
6546 struct nfs4_exception exception = { };
6547 int status;
6548
6549 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6550 (unsigned long long)server->fsid.major,
6551 (unsigned long long)server->fsid.minor,
6552 clp->cl_hostname);
6553 nfs_display_fhandle(NFS_FH(inode), __func__);
6554
6555 do {
6556 status = ops->get_locations(inode, locations, page, cred);
6557 if (status != -NFS4ERR_DELAY)
6558 break;
6559 nfs4_handle_exception(server, status, &exception);
6560 } while (exception.retry);
6561 return status;
6562 }
6563
6564 /*
6565 * This operation also signals the server that this client is
6566 * performing "lease moved" recovery. The server can stop
6567 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6568 * is appended to this compound to identify the client ID which is
6569 * performing recovery.
6570 */
6571 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6572 {
6573 struct nfs_server *server = NFS_SERVER(inode);
6574 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6575 struct rpc_clnt *clnt = server->client;
6576 struct nfs4_fsid_present_arg args = {
6577 .fh = NFS_FH(inode),
6578 .clientid = clp->cl_clientid,
6579 .renew = 1, /* append RENEW */
6580 };
6581 struct nfs4_fsid_present_res res = {
6582 .renew = 1,
6583 };
6584 struct rpc_message msg = {
6585 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6586 .rpc_argp = &args,
6587 .rpc_resp = &res,
6588 .rpc_cred = cred,
6589 };
6590 unsigned long now = jiffies;
6591 int status;
6592
6593 res.fh = nfs_alloc_fhandle();
6594 if (res.fh == NULL)
6595 return -ENOMEM;
6596
6597 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6598 nfs4_set_sequence_privileged(&args.seq_args);
6599 status = nfs4_call_sync_sequence(clnt, server, &msg,
6600 &args.seq_args, &res.seq_res);
6601 nfs_free_fhandle(res.fh);
6602 if (status)
6603 return status;
6604
6605 do_renew_lease(clp, now);
6606 return 0;
6607 }
6608
6609 #ifdef CONFIG_NFS_V4_1
6610
6611 /*
6612 * This operation also signals the server that this client is
6613 * performing "lease moved" recovery. The server can stop asserting
6614 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6615 * this operation is identified in the SEQUENCE operation in this
6616 * compound.
6617 */
6618 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6619 {
6620 struct nfs_server *server = NFS_SERVER(inode);
6621 struct rpc_clnt *clnt = server->client;
6622 struct nfs4_fsid_present_arg args = {
6623 .fh = NFS_FH(inode),
6624 };
6625 struct nfs4_fsid_present_res res = {
6626 };
6627 struct rpc_message msg = {
6628 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6629 .rpc_argp = &args,
6630 .rpc_resp = &res,
6631 .rpc_cred = cred,
6632 };
6633 int status;
6634
6635 res.fh = nfs_alloc_fhandle();
6636 if (res.fh == NULL)
6637 return -ENOMEM;
6638
6639 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6640 nfs4_set_sequence_privileged(&args.seq_args);
6641 status = nfs4_call_sync_sequence(clnt, server, &msg,
6642 &args.seq_args, &res.seq_res);
6643 nfs_free_fhandle(res.fh);
6644 if (status == NFS4_OK &&
6645 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6646 status = -NFS4ERR_LEASE_MOVED;
6647 return status;
6648 }
6649
6650 #endif /* CONFIG_NFS_V4_1 */
6651
6652 /**
6653 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6654 * @inode: inode on FSID to check
6655 * @cred: credential to use for this operation
6656 *
6657 * Server indicates whether the FSID is present, moved, or not
6658 * recognized. This operation is necessary to clear a LEASE_MOVED
6659 * condition for this client ID.
6660 *
6661 * Returns NFS4_OK if the FSID is present on this server,
6662 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6663 * NFS4ERR code if some error occurred on the server, or a
6664 * negative errno if a local failure occurred.
6665 */
6666 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6667 {
6668 struct nfs_server *server = NFS_SERVER(inode);
6669 struct nfs_client *clp = server->nfs_client;
6670 const struct nfs4_mig_recovery_ops *ops =
6671 clp->cl_mvops->mig_recovery_ops;
6672 struct nfs4_exception exception = { };
6673 int status;
6674
6675 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6676 (unsigned long long)server->fsid.major,
6677 (unsigned long long)server->fsid.minor,
6678 clp->cl_hostname);
6679 nfs_display_fhandle(NFS_FH(inode), __func__);
6680
6681 do {
6682 status = ops->fsid_present(inode, cred);
6683 if (status != -NFS4ERR_DELAY)
6684 break;
6685 nfs4_handle_exception(server, status, &exception);
6686 } while (exception.retry);
6687 return status;
6688 }
6689
6690 /**
6691 * If 'use_integrity' is true and the state managment nfs_client
6692 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6693 * and the machine credential as per RFC3530bis and RFC5661 Security
6694 * Considerations sections. Otherwise, just use the user cred with the
6695 * filesystem's rpc_client.
6696 */
6697 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6698 {
6699 int status;
6700 struct nfs4_secinfo_arg args = {
6701 .dir_fh = NFS_FH(dir),
6702 .name = name,
6703 };
6704 struct nfs4_secinfo_res res = {
6705 .flavors = flavors,
6706 };
6707 struct rpc_message msg = {
6708 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6709 .rpc_argp = &args,
6710 .rpc_resp = &res,
6711 };
6712 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6713 struct rpc_cred *cred = NULL;
6714
6715 if (use_integrity) {
6716 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6717 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6718 msg.rpc_cred = cred;
6719 }
6720
6721 dprintk("NFS call secinfo %s\n", name->name);
6722
6723 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6724 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6725
6726 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6727 &res.seq_res, 0);
6728 dprintk("NFS reply secinfo: %d\n", status);
6729
6730 if (cred)
6731 put_rpccred(cred);
6732
6733 return status;
6734 }
6735
6736 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6737 struct nfs4_secinfo_flavors *flavors)
6738 {
6739 struct nfs4_exception exception = { };
6740 int err;
6741 do {
6742 err = -NFS4ERR_WRONGSEC;
6743
6744 /* try to use integrity protection with machine cred */
6745 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6746 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6747
6748 /*
6749 * if unable to use integrity protection, or SECINFO with
6750 * integrity protection returns NFS4ERR_WRONGSEC (which is
6751 * disallowed by spec, but exists in deployed servers) use
6752 * the current filesystem's rpc_client and the user cred.
6753 */
6754 if (err == -NFS4ERR_WRONGSEC)
6755 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6756
6757 trace_nfs4_secinfo(dir, name, err);
6758 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6759 &exception);
6760 } while (exception.retry);
6761 return err;
6762 }
6763
6764 #ifdef CONFIG_NFS_V4_1
6765 /*
6766 * Check the exchange flags returned by the server for invalid flags, having
6767 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6768 * DS flags set.
6769 */
6770 static int nfs4_check_cl_exchange_flags(u32 flags)
6771 {
6772 if (flags & ~EXCHGID4_FLAG_MASK_R)
6773 goto out_inval;
6774 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6775 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6776 goto out_inval;
6777 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6778 goto out_inval;
6779 return NFS_OK;
6780 out_inval:
6781 return -NFS4ERR_INVAL;
6782 }
6783
6784 static bool
6785 nfs41_same_server_scope(struct nfs41_server_scope *a,
6786 struct nfs41_server_scope *b)
6787 {
6788 if (a->server_scope_sz == b->server_scope_sz &&
6789 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6790 return true;
6791
6792 return false;
6793 }
6794
6795 /*
6796 * nfs4_proc_bind_conn_to_session()
6797 *
6798 * The 4.1 client currently uses the same TCP connection for the
6799 * fore and backchannel.
6800 */
6801 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6802 {
6803 int status;
6804 struct nfs41_bind_conn_to_session_args args = {
6805 .client = clp,
6806 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6807 };
6808 struct nfs41_bind_conn_to_session_res res;
6809 struct rpc_message msg = {
6810 .rpc_proc =
6811 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6812 .rpc_argp = &args,
6813 .rpc_resp = &res,
6814 .rpc_cred = cred,
6815 };
6816
6817 dprintk("--> %s\n", __func__);
6818
6819 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6820 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6821 args.dir = NFS4_CDFC4_FORE;
6822
6823 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6824 trace_nfs4_bind_conn_to_session(clp, status);
6825 if (status == 0) {
6826 if (memcmp(res.sessionid.data,
6827 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6828 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6829 status = -EIO;
6830 goto out;
6831 }
6832 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6833 dprintk("NFS: %s: Unexpected direction from server\n",
6834 __func__);
6835 status = -EIO;
6836 goto out;
6837 }
6838 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6839 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6840 __func__);
6841 status = -EIO;
6842 goto out;
6843 }
6844 }
6845 out:
6846 dprintk("<-- %s status= %d\n", __func__, status);
6847 return status;
6848 }
6849
6850 /*
6851 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6852 * and operations we'd like to see to enable certain features in the allow map
6853 */
6854 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6855 .how = SP4_MACH_CRED,
6856 .enforce.u.words = {
6857 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6858 1 << (OP_EXCHANGE_ID - 32) |
6859 1 << (OP_CREATE_SESSION - 32) |
6860 1 << (OP_DESTROY_SESSION - 32) |
6861 1 << (OP_DESTROY_CLIENTID - 32)
6862 },
6863 .allow.u.words = {
6864 [0] = 1 << (OP_CLOSE) |
6865 1 << (OP_LOCKU) |
6866 1 << (OP_COMMIT),
6867 [1] = 1 << (OP_SECINFO - 32) |
6868 1 << (OP_SECINFO_NO_NAME - 32) |
6869 1 << (OP_TEST_STATEID - 32) |
6870 1 << (OP_FREE_STATEID - 32) |
6871 1 << (OP_WRITE - 32)
6872 }
6873 };
6874
6875 /*
6876 * Select the state protection mode for client `clp' given the server results
6877 * from exchange_id in `sp'.
6878 *
6879 * Returns 0 on success, negative errno otherwise.
6880 */
6881 static int nfs4_sp4_select_mode(struct nfs_client *clp,
6882 struct nfs41_state_protection *sp)
6883 {
6884 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
6885 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6886 1 << (OP_EXCHANGE_ID - 32) |
6887 1 << (OP_CREATE_SESSION - 32) |
6888 1 << (OP_DESTROY_SESSION - 32) |
6889 1 << (OP_DESTROY_CLIENTID - 32)
6890 };
6891 unsigned int i;
6892
6893 if (sp->how == SP4_MACH_CRED) {
6894 /* Print state protect result */
6895 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
6896 for (i = 0; i <= LAST_NFS4_OP; i++) {
6897 if (test_bit(i, sp->enforce.u.longs))
6898 dfprintk(MOUNT, " enforce op %d\n", i);
6899 if (test_bit(i, sp->allow.u.longs))
6900 dfprintk(MOUNT, " allow op %d\n", i);
6901 }
6902
6903 /* make sure nothing is on enforce list that isn't supported */
6904 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
6905 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
6906 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6907 return -EINVAL;
6908 }
6909 }
6910
6911 /*
6912 * Minimal mode - state operations are allowed to use machine
6913 * credential. Note this already happens by default, so the
6914 * client doesn't have to do anything more than the negotiation.
6915 *
6916 * NOTE: we don't care if EXCHANGE_ID is in the list -
6917 * we're already using the machine cred for exchange_id
6918 * and will never use a different cred.
6919 */
6920 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
6921 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
6922 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
6923 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
6924 dfprintk(MOUNT, "sp4_mach_cred:\n");
6925 dfprintk(MOUNT, " minimal mode enabled\n");
6926 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
6927 } else {
6928 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6929 return -EINVAL;
6930 }
6931
6932 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
6933 test_bit(OP_LOCKU, sp->allow.u.longs)) {
6934 dfprintk(MOUNT, " cleanup mode enabled\n");
6935 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
6936 }
6937
6938 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
6939 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
6940 dfprintk(MOUNT, " secinfo mode enabled\n");
6941 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
6942 }
6943
6944 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
6945 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
6946 dfprintk(MOUNT, " stateid mode enabled\n");
6947 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
6948 }
6949
6950 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
6951 dfprintk(MOUNT, " write mode enabled\n");
6952 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
6953 }
6954
6955 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
6956 dfprintk(MOUNT, " commit mode enabled\n");
6957 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
6958 }
6959 }
6960
6961 return 0;
6962 }
6963
6964 /*
6965 * _nfs4_proc_exchange_id()
6966 *
6967 * Wrapper for EXCHANGE_ID operation.
6968 */
6969 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
6970 u32 sp4_how)
6971 {
6972 nfs4_verifier verifier;
6973 struct nfs41_exchange_id_args args = {
6974 .verifier = &verifier,
6975 .client = clp,
6976 #ifdef CONFIG_NFS_V4_1_MIGRATION
6977 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6978 EXCHGID4_FLAG_BIND_PRINC_STATEID |
6979 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
6980 #else
6981 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6982 EXCHGID4_FLAG_BIND_PRINC_STATEID,
6983 #endif
6984 };
6985 struct nfs41_exchange_id_res res = {
6986 0
6987 };
6988 int status;
6989 struct rpc_message msg = {
6990 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
6991 .rpc_argp = &args,
6992 .rpc_resp = &res,
6993 .rpc_cred = cred,
6994 };
6995
6996 nfs4_init_boot_verifier(clp, &verifier);
6997
6998 status = nfs4_init_uniform_client_string(clp);
6999 if (status)
7000 goto out;
7001
7002 dprintk("NFS call exchange_id auth=%s, '%s'\n",
7003 clp->cl_rpcclient->cl_auth->au_ops->au_name,
7004 clp->cl_owner_id);
7005
7006 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
7007 GFP_NOFS);
7008 if (unlikely(res.server_owner == NULL)) {
7009 status = -ENOMEM;
7010 goto out;
7011 }
7012
7013 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
7014 GFP_NOFS);
7015 if (unlikely(res.server_scope == NULL)) {
7016 status = -ENOMEM;
7017 goto out_server_owner;
7018 }
7019
7020 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
7021 if (unlikely(res.impl_id == NULL)) {
7022 status = -ENOMEM;
7023 goto out_server_scope;
7024 }
7025
7026 switch (sp4_how) {
7027 case SP4_NONE:
7028 args.state_protect.how = SP4_NONE;
7029 break;
7030
7031 case SP4_MACH_CRED:
7032 args.state_protect = nfs4_sp4_mach_cred_request;
7033 break;
7034
7035 default:
7036 /* unsupported! */
7037 WARN_ON_ONCE(1);
7038 status = -EINVAL;
7039 goto out_impl_id;
7040 }
7041
7042 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7043 trace_nfs4_exchange_id(clp, status);
7044 if (status == 0)
7045 status = nfs4_check_cl_exchange_flags(res.flags);
7046
7047 if (status == 0)
7048 status = nfs4_sp4_select_mode(clp, &res.state_protect);
7049
7050 if (status == 0) {
7051 clp->cl_clientid = res.clientid;
7052 clp->cl_exchange_flags = res.flags;
7053 /* Client ID is not confirmed */
7054 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
7055 clear_bit(NFS4_SESSION_ESTABLISHED,
7056 &clp->cl_session->session_state);
7057 clp->cl_seqid = res.seqid;
7058 }
7059
7060 kfree(clp->cl_serverowner);
7061 clp->cl_serverowner = res.server_owner;
7062 res.server_owner = NULL;
7063
7064 /* use the most recent implementation id */
7065 kfree(clp->cl_implid);
7066 clp->cl_implid = res.impl_id;
7067 res.impl_id = NULL;
7068
7069 if (clp->cl_serverscope != NULL &&
7070 !nfs41_same_server_scope(clp->cl_serverscope,
7071 res.server_scope)) {
7072 dprintk("%s: server_scope mismatch detected\n",
7073 __func__);
7074 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7075 kfree(clp->cl_serverscope);
7076 clp->cl_serverscope = NULL;
7077 }
7078
7079 if (clp->cl_serverscope == NULL) {
7080 clp->cl_serverscope = res.server_scope;
7081 res.server_scope = NULL;
7082 }
7083 }
7084
7085 out_impl_id:
7086 kfree(res.impl_id);
7087 out_server_scope:
7088 kfree(res.server_scope);
7089 out_server_owner:
7090 kfree(res.server_owner);
7091 out:
7092 if (clp->cl_implid != NULL)
7093 dprintk("NFS reply exchange_id: Server Implementation ID: "
7094 "domain: %s, name: %s, date: %llu,%u\n",
7095 clp->cl_implid->domain, clp->cl_implid->name,
7096 clp->cl_implid->date.seconds,
7097 clp->cl_implid->date.nseconds);
7098 dprintk("NFS reply exchange_id: %d\n", status);
7099 return status;
7100 }
7101
7102 /*
7103 * nfs4_proc_exchange_id()
7104 *
7105 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7106 *
7107 * Since the clientid has expired, all compounds using sessions
7108 * associated with the stale clientid will be returning
7109 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7110 * be in some phase of session reset.
7111 *
7112 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7113 */
7114 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7115 {
7116 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7117 int status;
7118
7119 /* try SP4_MACH_CRED if krb5i/p */
7120 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7121 authflavor == RPC_AUTH_GSS_KRB5P) {
7122 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7123 if (!status)
7124 return 0;
7125 }
7126
7127 /* try SP4_NONE */
7128 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7129 }
7130
7131 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7132 struct rpc_cred *cred)
7133 {
7134 struct rpc_message msg = {
7135 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7136 .rpc_argp = clp,
7137 .rpc_cred = cred,
7138 };
7139 int status;
7140
7141 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7142 trace_nfs4_destroy_clientid(clp, status);
7143 if (status)
7144 dprintk("NFS: Got error %d from the server %s on "
7145 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7146 return status;
7147 }
7148
7149 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7150 struct rpc_cred *cred)
7151 {
7152 unsigned int loop;
7153 int ret;
7154
7155 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7156 ret = _nfs4_proc_destroy_clientid(clp, cred);
7157 switch (ret) {
7158 case -NFS4ERR_DELAY:
7159 case -NFS4ERR_CLIENTID_BUSY:
7160 ssleep(1);
7161 break;
7162 default:
7163 return ret;
7164 }
7165 }
7166 return 0;
7167 }
7168
7169 int nfs4_destroy_clientid(struct nfs_client *clp)
7170 {
7171 struct rpc_cred *cred;
7172 int ret = 0;
7173
7174 if (clp->cl_mvops->minor_version < 1)
7175 goto out;
7176 if (clp->cl_exchange_flags == 0)
7177 goto out;
7178 if (clp->cl_preserve_clid)
7179 goto out;
7180 cred = nfs4_get_clid_cred(clp);
7181 ret = nfs4_proc_destroy_clientid(clp, cred);
7182 if (cred)
7183 put_rpccred(cred);
7184 switch (ret) {
7185 case 0:
7186 case -NFS4ERR_STALE_CLIENTID:
7187 clp->cl_exchange_flags = 0;
7188 }
7189 out:
7190 return ret;
7191 }
7192
7193 struct nfs4_get_lease_time_data {
7194 struct nfs4_get_lease_time_args *args;
7195 struct nfs4_get_lease_time_res *res;
7196 struct nfs_client *clp;
7197 };
7198
7199 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7200 void *calldata)
7201 {
7202 struct nfs4_get_lease_time_data *data =
7203 (struct nfs4_get_lease_time_data *)calldata;
7204
7205 dprintk("--> %s\n", __func__);
7206 /* just setup sequence, do not trigger session recovery
7207 since we're invoked within one */
7208 nfs41_setup_sequence(data->clp->cl_session,
7209 &data->args->la_seq_args,
7210 &data->res->lr_seq_res,
7211 task);
7212 dprintk("<-- %s\n", __func__);
7213 }
7214
7215 /*
7216 * Called from nfs4_state_manager thread for session setup, so don't recover
7217 * from sequence operation or clientid errors.
7218 */
7219 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7220 {
7221 struct nfs4_get_lease_time_data *data =
7222 (struct nfs4_get_lease_time_data *)calldata;
7223
7224 dprintk("--> %s\n", __func__);
7225 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7226 return;
7227 switch (task->tk_status) {
7228 case -NFS4ERR_DELAY:
7229 case -NFS4ERR_GRACE:
7230 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7231 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7232 task->tk_status = 0;
7233 /* fall through */
7234 case -NFS4ERR_RETRY_UNCACHED_REP:
7235 rpc_restart_call_prepare(task);
7236 return;
7237 }
7238 dprintk("<-- %s\n", __func__);
7239 }
7240
7241 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7242 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7243 .rpc_call_done = nfs4_get_lease_time_done,
7244 };
7245
7246 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7247 {
7248 struct rpc_task *task;
7249 struct nfs4_get_lease_time_args args;
7250 struct nfs4_get_lease_time_res res = {
7251 .lr_fsinfo = fsinfo,
7252 };
7253 struct nfs4_get_lease_time_data data = {
7254 .args = &args,
7255 .res = &res,
7256 .clp = clp,
7257 };
7258 struct rpc_message msg = {
7259 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7260 .rpc_argp = &args,
7261 .rpc_resp = &res,
7262 };
7263 struct rpc_task_setup task_setup = {
7264 .rpc_client = clp->cl_rpcclient,
7265 .rpc_message = &msg,
7266 .callback_ops = &nfs4_get_lease_time_ops,
7267 .callback_data = &data,
7268 .flags = RPC_TASK_TIMEOUT,
7269 };
7270 int status;
7271
7272 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7273 nfs4_set_sequence_privileged(&args.la_seq_args);
7274 dprintk("--> %s\n", __func__);
7275 task = rpc_run_task(&task_setup);
7276
7277 if (IS_ERR(task))
7278 status = PTR_ERR(task);
7279 else {
7280 status = task->tk_status;
7281 rpc_put_task(task);
7282 }
7283 dprintk("<-- %s return %d\n", __func__, status);
7284
7285 return status;
7286 }
7287
7288 /*
7289 * Initialize the values to be used by the client in CREATE_SESSION
7290 * If nfs4_init_session set the fore channel request and response sizes,
7291 * use them.
7292 *
7293 * Set the back channel max_resp_sz_cached to zero to force the client to
7294 * always set csa_cachethis to FALSE because the current implementation
7295 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7296 */
7297 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7298 {
7299 unsigned int max_rqst_sz, max_resp_sz;
7300
7301 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7302 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7303
7304 /* Fore channel attributes */
7305 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7306 args->fc_attrs.max_resp_sz = max_resp_sz;
7307 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7308 args->fc_attrs.max_reqs = max_session_slots;
7309
7310 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7311 "max_ops=%u max_reqs=%u\n",
7312 __func__,
7313 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7314 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7315
7316 /* Back channel attributes */
7317 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
7318 args->bc_attrs.max_resp_sz = PAGE_SIZE;
7319 args->bc_attrs.max_resp_sz_cached = 0;
7320 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7321 args->bc_attrs.max_reqs = 1;
7322
7323 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7324 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7325 __func__,
7326 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7327 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7328 args->bc_attrs.max_reqs);
7329 }
7330
7331 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7332 struct nfs41_create_session_res *res)
7333 {
7334 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7335 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7336
7337 if (rcvd->max_resp_sz > sent->max_resp_sz)
7338 return -EINVAL;
7339 /*
7340 * Our requested max_ops is the minimum we need; we're not
7341 * prepared to break up compounds into smaller pieces than that.
7342 * So, no point even trying to continue if the server won't
7343 * cooperate:
7344 */
7345 if (rcvd->max_ops < sent->max_ops)
7346 return -EINVAL;
7347 if (rcvd->max_reqs == 0)
7348 return -EINVAL;
7349 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7350 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7351 return 0;
7352 }
7353
7354 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7355 struct nfs41_create_session_res *res)
7356 {
7357 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7358 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7359
7360 if (!(res->flags & SESSION4_BACK_CHAN))
7361 goto out;
7362 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7363 return -EINVAL;
7364 if (rcvd->max_resp_sz < sent->max_resp_sz)
7365 return -EINVAL;
7366 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7367 return -EINVAL;
7368 /* These would render the backchannel useless: */
7369 if (rcvd->max_ops != sent->max_ops)
7370 return -EINVAL;
7371 if (rcvd->max_reqs != sent->max_reqs)
7372 return -EINVAL;
7373 out:
7374 return 0;
7375 }
7376
7377 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7378 struct nfs41_create_session_res *res)
7379 {
7380 int ret;
7381
7382 ret = nfs4_verify_fore_channel_attrs(args, res);
7383 if (ret)
7384 return ret;
7385 return nfs4_verify_back_channel_attrs(args, res);
7386 }
7387
7388 static void nfs4_update_session(struct nfs4_session *session,
7389 struct nfs41_create_session_res *res)
7390 {
7391 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7392 /* Mark client id and session as being confirmed */
7393 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7394 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7395 session->flags = res->flags;
7396 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7397 if (res->flags & SESSION4_BACK_CHAN)
7398 memcpy(&session->bc_attrs, &res->bc_attrs,
7399 sizeof(session->bc_attrs));
7400 }
7401
7402 static int _nfs4_proc_create_session(struct nfs_client *clp,
7403 struct rpc_cred *cred)
7404 {
7405 struct nfs4_session *session = clp->cl_session;
7406 struct nfs41_create_session_args args = {
7407 .client = clp,
7408 .clientid = clp->cl_clientid,
7409 .seqid = clp->cl_seqid,
7410 .cb_program = NFS4_CALLBACK,
7411 };
7412 struct nfs41_create_session_res res;
7413
7414 struct rpc_message msg = {
7415 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7416 .rpc_argp = &args,
7417 .rpc_resp = &res,
7418 .rpc_cred = cred,
7419 };
7420 int status;
7421
7422 nfs4_init_channel_attrs(&args);
7423 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7424
7425 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7426 trace_nfs4_create_session(clp, status);
7427
7428 if (!status) {
7429 /* Verify the session's negotiated channel_attrs values */
7430 status = nfs4_verify_channel_attrs(&args, &res);
7431 /* Increment the clientid slot sequence id */
7432 if (clp->cl_seqid == res.seqid)
7433 clp->cl_seqid++;
7434 if (status)
7435 goto out;
7436 nfs4_update_session(session, &res);
7437 }
7438 out:
7439 return status;
7440 }
7441
7442 /*
7443 * Issues a CREATE_SESSION operation to the server.
7444 * It is the responsibility of the caller to verify the session is
7445 * expired before calling this routine.
7446 */
7447 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7448 {
7449 int status;
7450 unsigned *ptr;
7451 struct nfs4_session *session = clp->cl_session;
7452
7453 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7454
7455 status = _nfs4_proc_create_session(clp, cred);
7456 if (status)
7457 goto out;
7458
7459 /* Init or reset the session slot tables */
7460 status = nfs4_setup_session_slot_tables(session);
7461 dprintk("slot table setup returned %d\n", status);
7462 if (status)
7463 goto out;
7464
7465 ptr = (unsigned *)&session->sess_id.data[0];
7466 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7467 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7468 out:
7469 dprintk("<-- %s\n", __func__);
7470 return status;
7471 }
7472
7473 /*
7474 * Issue the over-the-wire RPC DESTROY_SESSION.
7475 * The caller must serialize access to this routine.
7476 */
7477 int nfs4_proc_destroy_session(struct nfs4_session *session,
7478 struct rpc_cred *cred)
7479 {
7480 struct rpc_message msg = {
7481 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7482 .rpc_argp = session,
7483 .rpc_cred = cred,
7484 };
7485 int status = 0;
7486
7487 dprintk("--> nfs4_proc_destroy_session\n");
7488
7489 /* session is still being setup */
7490 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7491 return 0;
7492
7493 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7494 trace_nfs4_destroy_session(session->clp, status);
7495
7496 if (status)
7497 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7498 "Session has been destroyed regardless...\n", status);
7499
7500 dprintk("<-- nfs4_proc_destroy_session\n");
7501 return status;
7502 }
7503
7504 /*
7505 * Renew the cl_session lease.
7506 */
7507 struct nfs4_sequence_data {
7508 struct nfs_client *clp;
7509 struct nfs4_sequence_args args;
7510 struct nfs4_sequence_res res;
7511 };
7512
7513 static void nfs41_sequence_release(void *data)
7514 {
7515 struct nfs4_sequence_data *calldata = data;
7516 struct nfs_client *clp = calldata->clp;
7517
7518 if (atomic_read(&clp->cl_count) > 1)
7519 nfs4_schedule_state_renewal(clp);
7520 nfs_put_client(clp);
7521 kfree(calldata);
7522 }
7523
7524 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7525 {
7526 switch(task->tk_status) {
7527 case -NFS4ERR_DELAY:
7528 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7529 return -EAGAIN;
7530 default:
7531 nfs4_schedule_lease_recovery(clp);
7532 }
7533 return 0;
7534 }
7535
7536 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7537 {
7538 struct nfs4_sequence_data *calldata = data;
7539 struct nfs_client *clp = calldata->clp;
7540
7541 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7542 return;
7543
7544 trace_nfs4_sequence(clp, task->tk_status);
7545 if (task->tk_status < 0) {
7546 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7547 if (atomic_read(&clp->cl_count) == 1)
7548 goto out;
7549
7550 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7551 rpc_restart_call_prepare(task);
7552 return;
7553 }
7554 }
7555 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7556 out:
7557 dprintk("<-- %s\n", __func__);
7558 }
7559
7560 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7561 {
7562 struct nfs4_sequence_data *calldata = data;
7563 struct nfs_client *clp = calldata->clp;
7564 struct nfs4_sequence_args *args;
7565 struct nfs4_sequence_res *res;
7566
7567 args = task->tk_msg.rpc_argp;
7568 res = task->tk_msg.rpc_resp;
7569
7570 nfs41_setup_sequence(clp->cl_session, args, res, task);
7571 }
7572
7573 static const struct rpc_call_ops nfs41_sequence_ops = {
7574 .rpc_call_done = nfs41_sequence_call_done,
7575 .rpc_call_prepare = nfs41_sequence_prepare,
7576 .rpc_release = nfs41_sequence_release,
7577 };
7578
7579 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7580 struct rpc_cred *cred,
7581 bool is_privileged)
7582 {
7583 struct nfs4_sequence_data *calldata;
7584 struct rpc_message msg = {
7585 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7586 .rpc_cred = cred,
7587 };
7588 struct rpc_task_setup task_setup_data = {
7589 .rpc_client = clp->cl_rpcclient,
7590 .rpc_message = &msg,
7591 .callback_ops = &nfs41_sequence_ops,
7592 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7593 };
7594
7595 if (!atomic_inc_not_zero(&clp->cl_count))
7596 return ERR_PTR(-EIO);
7597 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7598 if (calldata == NULL) {
7599 nfs_put_client(clp);
7600 return ERR_PTR(-ENOMEM);
7601 }
7602 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7603 if (is_privileged)
7604 nfs4_set_sequence_privileged(&calldata->args);
7605 msg.rpc_argp = &calldata->args;
7606 msg.rpc_resp = &calldata->res;
7607 calldata->clp = clp;
7608 task_setup_data.callback_data = calldata;
7609
7610 return rpc_run_task(&task_setup_data);
7611 }
7612
7613 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7614 {
7615 struct rpc_task *task;
7616 int ret = 0;
7617
7618 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7619 return -EAGAIN;
7620 task = _nfs41_proc_sequence(clp, cred, false);
7621 if (IS_ERR(task))
7622 ret = PTR_ERR(task);
7623 else
7624 rpc_put_task_async(task);
7625 dprintk("<-- %s status=%d\n", __func__, ret);
7626 return ret;
7627 }
7628
7629 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7630 {
7631 struct rpc_task *task;
7632 int ret;
7633
7634 task = _nfs41_proc_sequence(clp, cred, true);
7635 if (IS_ERR(task)) {
7636 ret = PTR_ERR(task);
7637 goto out;
7638 }
7639 ret = rpc_wait_for_completion_task(task);
7640 if (!ret)
7641 ret = task->tk_status;
7642 rpc_put_task(task);
7643 out:
7644 dprintk("<-- %s status=%d\n", __func__, ret);
7645 return ret;
7646 }
7647
7648 struct nfs4_reclaim_complete_data {
7649 struct nfs_client *clp;
7650 struct nfs41_reclaim_complete_args arg;
7651 struct nfs41_reclaim_complete_res res;
7652 };
7653
7654 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7655 {
7656 struct nfs4_reclaim_complete_data *calldata = data;
7657
7658 nfs41_setup_sequence(calldata->clp->cl_session,
7659 &calldata->arg.seq_args,
7660 &calldata->res.seq_res,
7661 task);
7662 }
7663
7664 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7665 {
7666 switch(task->tk_status) {
7667 case 0:
7668 case -NFS4ERR_COMPLETE_ALREADY:
7669 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7670 break;
7671 case -NFS4ERR_DELAY:
7672 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7673 /* fall through */
7674 case -NFS4ERR_RETRY_UNCACHED_REP:
7675 return -EAGAIN;
7676 default:
7677 nfs4_schedule_lease_recovery(clp);
7678 }
7679 return 0;
7680 }
7681
7682 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7683 {
7684 struct nfs4_reclaim_complete_data *calldata = data;
7685 struct nfs_client *clp = calldata->clp;
7686 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7687
7688 dprintk("--> %s\n", __func__);
7689 if (!nfs41_sequence_done(task, res))
7690 return;
7691
7692 trace_nfs4_reclaim_complete(clp, task->tk_status);
7693 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7694 rpc_restart_call_prepare(task);
7695 return;
7696 }
7697 dprintk("<-- %s\n", __func__);
7698 }
7699
7700 static void nfs4_free_reclaim_complete_data(void *data)
7701 {
7702 struct nfs4_reclaim_complete_data *calldata = data;
7703
7704 kfree(calldata);
7705 }
7706
7707 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7708 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7709 .rpc_call_done = nfs4_reclaim_complete_done,
7710 .rpc_release = nfs4_free_reclaim_complete_data,
7711 };
7712
7713 /*
7714 * Issue a global reclaim complete.
7715 */
7716 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7717 struct rpc_cred *cred)
7718 {
7719 struct nfs4_reclaim_complete_data *calldata;
7720 struct rpc_task *task;
7721 struct rpc_message msg = {
7722 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7723 .rpc_cred = cred,
7724 };
7725 struct rpc_task_setup task_setup_data = {
7726 .rpc_client = clp->cl_rpcclient,
7727 .rpc_message = &msg,
7728 .callback_ops = &nfs4_reclaim_complete_call_ops,
7729 .flags = RPC_TASK_ASYNC,
7730 };
7731 int status = -ENOMEM;
7732
7733 dprintk("--> %s\n", __func__);
7734 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7735 if (calldata == NULL)
7736 goto out;
7737 calldata->clp = clp;
7738 calldata->arg.one_fs = 0;
7739
7740 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7741 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7742 msg.rpc_argp = &calldata->arg;
7743 msg.rpc_resp = &calldata->res;
7744 task_setup_data.callback_data = calldata;
7745 task = rpc_run_task(&task_setup_data);
7746 if (IS_ERR(task)) {
7747 status = PTR_ERR(task);
7748 goto out;
7749 }
7750 status = nfs4_wait_for_completion_rpc_task(task);
7751 if (status == 0)
7752 status = task->tk_status;
7753 rpc_put_task(task);
7754 return 0;
7755 out:
7756 dprintk("<-- %s status=%d\n", __func__, status);
7757 return status;
7758 }
7759
7760 static void
7761 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7762 {
7763 struct nfs4_layoutget *lgp = calldata;
7764 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7765 struct nfs4_session *session = nfs4_get_session(server);
7766
7767 dprintk("--> %s\n", __func__);
7768 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
7769 * right now covering the LAYOUTGET we are about to send.
7770 * However, that is not so catastrophic, and there seems
7771 * to be no way to prevent it completely.
7772 */
7773 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
7774 &lgp->res.seq_res, task))
7775 return;
7776 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
7777 NFS_I(lgp->args.inode)->layout,
7778 &lgp->args.range,
7779 lgp->args.ctx->state)) {
7780 rpc_exit(task, NFS4_OK);
7781 }
7782 }
7783
7784 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7785 {
7786 struct nfs4_layoutget *lgp = calldata;
7787 struct inode *inode = lgp->args.inode;
7788 struct nfs_server *server = NFS_SERVER(inode);
7789 struct pnfs_layout_hdr *lo;
7790 struct nfs4_state *state = NULL;
7791 unsigned long timeo, now, giveup;
7792
7793 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7794
7795 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
7796 goto out;
7797
7798 switch (task->tk_status) {
7799 case 0:
7800 goto out;
7801 /*
7802 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
7803 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
7804 */
7805 case -NFS4ERR_BADLAYOUT:
7806 goto out_overflow;
7807 /*
7808 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7809 * (or clients) writing to the same RAID stripe except when
7810 * the minlength argument is 0 (see RFC5661 section 18.43.3).
7811 */
7812 case -NFS4ERR_LAYOUTTRYLATER:
7813 if (lgp->args.minlength == 0)
7814 goto out_overflow;
7815 /*
7816 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
7817 * existing layout before getting a new one).
7818 */
7819 case -NFS4ERR_RECALLCONFLICT:
7820 timeo = rpc_get_timeout(task->tk_client);
7821 giveup = lgp->args.timestamp + timeo;
7822 now = jiffies;
7823 if (time_after(giveup, now)) {
7824 unsigned long delay;
7825
7826 /* Delay for:
7827 * - Not less then NFS4_POLL_RETRY_MIN.
7828 * - One last time a jiffie before we give up
7829 * - exponential backoff (time_now minus start_attempt)
7830 */
7831 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
7832 min((giveup - now - 1),
7833 now - lgp->args.timestamp));
7834
7835 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7836 __func__, delay);
7837 rpc_delay(task, delay);
7838 /* Do not call nfs4_async_handle_error() */
7839 goto out_restart;
7840 }
7841 break;
7842 case -NFS4ERR_EXPIRED:
7843 case -NFS4ERR_BAD_STATEID:
7844 spin_lock(&inode->i_lock);
7845 if (nfs4_stateid_match(&lgp->args.stateid,
7846 &lgp->args.ctx->state->stateid)) {
7847 spin_unlock(&inode->i_lock);
7848 /* If the open stateid was bad, then recover it. */
7849 state = lgp->args.ctx->state;
7850 break;
7851 }
7852 lo = NFS_I(inode)->layout;
7853 if (lo && nfs4_stateid_match(&lgp->args.stateid,
7854 &lo->plh_stateid)) {
7855 LIST_HEAD(head);
7856
7857 /*
7858 * Mark the bad layout state as invalid, then retry
7859 * with the current stateid.
7860 */
7861 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
7862 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7863 spin_unlock(&inode->i_lock);
7864 pnfs_free_lseg_list(&head);
7865 } else
7866 spin_unlock(&inode->i_lock);
7867 goto out_restart;
7868 }
7869 if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN)
7870 goto out_restart;
7871 out:
7872 dprintk("<-- %s\n", __func__);
7873 return;
7874 out_restart:
7875 task->tk_status = 0;
7876 rpc_restart_call_prepare(task);
7877 return;
7878 out_overflow:
7879 task->tk_status = -EOVERFLOW;
7880 goto out;
7881 }
7882
7883 static size_t max_response_pages(struct nfs_server *server)
7884 {
7885 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
7886 return nfs_page_array_len(0, max_resp_sz);
7887 }
7888
7889 static void nfs4_free_pages(struct page **pages, size_t size)
7890 {
7891 int i;
7892
7893 if (!pages)
7894 return;
7895
7896 for (i = 0; i < size; i++) {
7897 if (!pages[i])
7898 break;
7899 __free_page(pages[i]);
7900 }
7901 kfree(pages);
7902 }
7903
7904 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
7905 {
7906 struct page **pages;
7907 int i;
7908
7909 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
7910 if (!pages) {
7911 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
7912 return NULL;
7913 }
7914
7915 for (i = 0; i < size; i++) {
7916 pages[i] = alloc_page(gfp_flags);
7917 if (!pages[i]) {
7918 dprintk("%s: failed to allocate page\n", __func__);
7919 nfs4_free_pages(pages, size);
7920 return NULL;
7921 }
7922 }
7923
7924 return pages;
7925 }
7926
7927 static void nfs4_layoutget_release(void *calldata)
7928 {
7929 struct nfs4_layoutget *lgp = calldata;
7930 struct inode *inode = lgp->args.inode;
7931 struct nfs_server *server = NFS_SERVER(inode);
7932 size_t max_pages = max_response_pages(server);
7933
7934 dprintk("--> %s\n", __func__);
7935 nfs4_free_pages(lgp->args.layout.pages, max_pages);
7936 pnfs_put_layout_hdr(NFS_I(inode)->layout);
7937 put_nfs_open_context(lgp->args.ctx);
7938 kfree(calldata);
7939 dprintk("<-- %s\n", __func__);
7940 }
7941
7942 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
7943 .rpc_call_prepare = nfs4_layoutget_prepare,
7944 .rpc_call_done = nfs4_layoutget_done,
7945 .rpc_release = nfs4_layoutget_release,
7946 };
7947
7948 struct pnfs_layout_segment *
7949 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
7950 {
7951 struct inode *inode = lgp->args.inode;
7952 struct nfs_server *server = NFS_SERVER(inode);
7953 size_t max_pages = max_response_pages(server);
7954 struct rpc_task *task;
7955 struct rpc_message msg = {
7956 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
7957 .rpc_argp = &lgp->args,
7958 .rpc_resp = &lgp->res,
7959 .rpc_cred = lgp->cred,
7960 };
7961 struct rpc_task_setup task_setup_data = {
7962 .rpc_client = server->client,
7963 .rpc_message = &msg,
7964 .callback_ops = &nfs4_layoutget_call_ops,
7965 .callback_data = lgp,
7966 .flags = RPC_TASK_ASYNC,
7967 };
7968 struct pnfs_layout_segment *lseg = NULL;
7969 int status = 0;
7970
7971 dprintk("--> %s\n", __func__);
7972
7973 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
7974 pnfs_get_layout_hdr(NFS_I(inode)->layout);
7975
7976 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
7977 if (!lgp->args.layout.pages) {
7978 nfs4_layoutget_release(lgp);
7979 return ERR_PTR(-ENOMEM);
7980 }
7981 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
7982 lgp->args.timestamp = jiffies;
7983
7984 lgp->res.layoutp = &lgp->args.layout;
7985 lgp->res.seq_res.sr_slot = NULL;
7986 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
7987
7988 task = rpc_run_task(&task_setup_data);
7989 if (IS_ERR(task))
7990 return ERR_CAST(task);
7991 status = nfs4_wait_for_completion_rpc_task(task);
7992 if (status == 0)
7993 status = task->tk_status;
7994 trace_nfs4_layoutget(lgp->args.ctx,
7995 &lgp->args.range,
7996 &lgp->res.range,
7997 status);
7998 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
7999 if (status == 0 && lgp->res.layoutp->len)
8000 lseg = pnfs_layout_process(lgp);
8001 rpc_put_task(task);
8002 dprintk("<-- %s status=%d\n", __func__, status);
8003 if (status)
8004 return ERR_PTR(status);
8005 return lseg;
8006 }
8007
8008 static void
8009 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
8010 {
8011 struct nfs4_layoutreturn *lrp = calldata;
8012
8013 dprintk("--> %s\n", __func__);
8014 nfs41_setup_sequence(lrp->clp->cl_session,
8015 &lrp->args.seq_args,
8016 &lrp->res.seq_res,
8017 task);
8018 }
8019
8020 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
8021 {
8022 struct nfs4_layoutreturn *lrp = calldata;
8023 struct nfs_server *server;
8024
8025 dprintk("--> %s\n", __func__);
8026
8027 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
8028 return;
8029
8030 server = NFS_SERVER(lrp->args.inode);
8031 switch (task->tk_status) {
8032 default:
8033 task->tk_status = 0;
8034 case 0:
8035 break;
8036 case -NFS4ERR_DELAY:
8037 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
8038 break;
8039 rpc_restart_call_prepare(task);
8040 return;
8041 }
8042 dprintk("<-- %s\n", __func__);
8043 }
8044
8045 static void nfs4_layoutreturn_release(void *calldata)
8046 {
8047 struct nfs4_layoutreturn *lrp = calldata;
8048 struct pnfs_layout_hdr *lo = lrp->args.layout;
8049 LIST_HEAD(freeme);
8050
8051 dprintk("--> %s\n", __func__);
8052 spin_lock(&lo->plh_inode->i_lock);
8053 if (lrp->res.lrs_present)
8054 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
8055 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
8056 pnfs_clear_layoutreturn_waitbit(lo);
8057 lo->plh_block_lgets--;
8058 spin_unlock(&lo->plh_inode->i_lock);
8059 pnfs_free_lseg_list(&freeme);
8060 pnfs_put_layout_hdr(lrp->args.layout);
8061 nfs_iput_and_deactive(lrp->inode);
8062 kfree(calldata);
8063 dprintk("<-- %s\n", __func__);
8064 }
8065
8066 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
8067 .rpc_call_prepare = nfs4_layoutreturn_prepare,
8068 .rpc_call_done = nfs4_layoutreturn_done,
8069 .rpc_release = nfs4_layoutreturn_release,
8070 };
8071
8072 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
8073 {
8074 struct rpc_task *task;
8075 struct rpc_message msg = {
8076 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
8077 .rpc_argp = &lrp->args,
8078 .rpc_resp = &lrp->res,
8079 .rpc_cred = lrp->cred,
8080 };
8081 struct rpc_task_setup task_setup_data = {
8082 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8083 .rpc_message = &msg,
8084 .callback_ops = &nfs4_layoutreturn_call_ops,
8085 .callback_data = lrp,
8086 };
8087 int status = 0;
8088
8089 dprintk("--> %s\n", __func__);
8090 if (!sync) {
8091 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8092 if (!lrp->inode) {
8093 nfs4_layoutreturn_release(lrp);
8094 return -EAGAIN;
8095 }
8096 task_setup_data.flags |= RPC_TASK_ASYNC;
8097 }
8098 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8099 task = rpc_run_task(&task_setup_data);
8100 if (IS_ERR(task))
8101 return PTR_ERR(task);
8102 if (sync)
8103 status = task->tk_status;
8104 trace_nfs4_layoutreturn(lrp->args.inode, status);
8105 dprintk("<-- %s status=%d\n", __func__, status);
8106 rpc_put_task(task);
8107 return status;
8108 }
8109
8110 static int
8111 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
8112 struct pnfs_device *pdev,
8113 struct rpc_cred *cred)
8114 {
8115 struct nfs4_getdeviceinfo_args args = {
8116 .pdev = pdev,
8117 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8118 NOTIFY_DEVICEID4_DELETE,
8119 };
8120 struct nfs4_getdeviceinfo_res res = {
8121 .pdev = pdev,
8122 };
8123 struct rpc_message msg = {
8124 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8125 .rpc_argp = &args,
8126 .rpc_resp = &res,
8127 .rpc_cred = cred,
8128 };
8129 int status;
8130
8131 dprintk("--> %s\n", __func__);
8132 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8133 if (res.notification & ~args.notify_types)
8134 dprintk("%s: unsupported notification\n", __func__);
8135 if (res.notification != args.notify_types)
8136 pdev->nocache = 1;
8137
8138 dprintk("<-- %s status=%d\n", __func__, status);
8139
8140 return status;
8141 }
8142
8143 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8144 struct pnfs_device *pdev,
8145 struct rpc_cred *cred)
8146 {
8147 struct nfs4_exception exception = { };
8148 int err;
8149
8150 do {
8151 err = nfs4_handle_exception(server,
8152 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8153 &exception);
8154 } while (exception.retry);
8155 return err;
8156 }
8157 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8158
8159 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8160 {
8161 struct nfs4_layoutcommit_data *data = calldata;
8162 struct nfs_server *server = NFS_SERVER(data->args.inode);
8163 struct nfs4_session *session = nfs4_get_session(server);
8164
8165 nfs41_setup_sequence(session,
8166 &data->args.seq_args,
8167 &data->res.seq_res,
8168 task);
8169 }
8170
8171 static void
8172 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8173 {
8174 struct nfs4_layoutcommit_data *data = calldata;
8175 struct nfs_server *server = NFS_SERVER(data->args.inode);
8176
8177 if (!nfs41_sequence_done(task, &data->res.seq_res))
8178 return;
8179
8180 switch (task->tk_status) { /* Just ignore these failures */
8181 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8182 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8183 case -NFS4ERR_BADLAYOUT: /* no layout */
8184 case -NFS4ERR_GRACE: /* loca_recalim always false */
8185 task->tk_status = 0;
8186 case 0:
8187 break;
8188 default:
8189 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8190 rpc_restart_call_prepare(task);
8191 return;
8192 }
8193 }
8194 }
8195
8196 static void nfs4_layoutcommit_release(void *calldata)
8197 {
8198 struct nfs4_layoutcommit_data *data = calldata;
8199
8200 pnfs_cleanup_layoutcommit(data);
8201 nfs_post_op_update_inode_force_wcc(data->args.inode,
8202 data->res.fattr);
8203 put_rpccred(data->cred);
8204 nfs_iput_and_deactive(data->inode);
8205 kfree(data);
8206 }
8207
8208 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8209 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8210 .rpc_call_done = nfs4_layoutcommit_done,
8211 .rpc_release = nfs4_layoutcommit_release,
8212 };
8213
8214 int
8215 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8216 {
8217 struct rpc_message msg = {
8218 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8219 .rpc_argp = &data->args,
8220 .rpc_resp = &data->res,
8221 .rpc_cred = data->cred,
8222 };
8223 struct rpc_task_setup task_setup_data = {
8224 .task = &data->task,
8225 .rpc_client = NFS_CLIENT(data->args.inode),
8226 .rpc_message = &msg,
8227 .callback_ops = &nfs4_layoutcommit_ops,
8228 .callback_data = data,
8229 };
8230 struct rpc_task *task;
8231 int status = 0;
8232
8233 dprintk("NFS: initiating layoutcommit call. sync %d "
8234 "lbw: %llu inode %lu\n", sync,
8235 data->args.lastbytewritten,
8236 data->args.inode->i_ino);
8237
8238 if (!sync) {
8239 data->inode = nfs_igrab_and_active(data->args.inode);
8240 if (data->inode == NULL) {
8241 nfs4_layoutcommit_release(data);
8242 return -EAGAIN;
8243 }
8244 task_setup_data.flags = RPC_TASK_ASYNC;
8245 }
8246 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8247 task = rpc_run_task(&task_setup_data);
8248 if (IS_ERR(task))
8249 return PTR_ERR(task);
8250 if (sync)
8251 status = task->tk_status;
8252 trace_nfs4_layoutcommit(data->args.inode, status);
8253 dprintk("%s: status %d\n", __func__, status);
8254 rpc_put_task(task);
8255 return status;
8256 }
8257
8258 /**
8259 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8260 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8261 */
8262 static int
8263 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8264 struct nfs_fsinfo *info,
8265 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8266 {
8267 struct nfs41_secinfo_no_name_args args = {
8268 .style = SECINFO_STYLE_CURRENT_FH,
8269 };
8270 struct nfs4_secinfo_res res = {
8271 .flavors = flavors,
8272 };
8273 struct rpc_message msg = {
8274 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8275 .rpc_argp = &args,
8276 .rpc_resp = &res,
8277 };
8278 struct rpc_clnt *clnt = server->client;
8279 struct rpc_cred *cred = NULL;
8280 int status;
8281
8282 if (use_integrity) {
8283 clnt = server->nfs_client->cl_rpcclient;
8284 cred = nfs4_get_clid_cred(server->nfs_client);
8285 msg.rpc_cred = cred;
8286 }
8287
8288 dprintk("--> %s\n", __func__);
8289 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8290 &res.seq_res, 0);
8291 dprintk("<-- %s status=%d\n", __func__, status);
8292
8293 if (cred)
8294 put_rpccred(cred);
8295
8296 return status;
8297 }
8298
8299 static int
8300 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8301 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8302 {
8303 struct nfs4_exception exception = { };
8304 int err;
8305 do {
8306 /* first try using integrity protection */
8307 err = -NFS4ERR_WRONGSEC;
8308
8309 /* try to use integrity protection with machine cred */
8310 if (_nfs4_is_integrity_protected(server->nfs_client))
8311 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8312 flavors, true);
8313
8314 /*
8315 * if unable to use integrity protection, or SECINFO with
8316 * integrity protection returns NFS4ERR_WRONGSEC (which is
8317 * disallowed by spec, but exists in deployed servers) use
8318 * the current filesystem's rpc_client and the user cred.
8319 */
8320 if (err == -NFS4ERR_WRONGSEC)
8321 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8322 flavors, false);
8323
8324 switch (err) {
8325 case 0:
8326 case -NFS4ERR_WRONGSEC:
8327 case -ENOTSUPP:
8328 goto out;
8329 default:
8330 err = nfs4_handle_exception(server, err, &exception);
8331 }
8332 } while (exception.retry);
8333 out:
8334 return err;
8335 }
8336
8337 static int
8338 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8339 struct nfs_fsinfo *info)
8340 {
8341 int err;
8342 struct page *page;
8343 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8344 struct nfs4_secinfo_flavors *flavors;
8345 struct nfs4_secinfo4 *secinfo;
8346 int i;
8347
8348 page = alloc_page(GFP_KERNEL);
8349 if (!page) {
8350 err = -ENOMEM;
8351 goto out;
8352 }
8353
8354 flavors = page_address(page);
8355 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8356
8357 /*
8358 * Fall back on "guess and check" method if
8359 * the server doesn't support SECINFO_NO_NAME
8360 */
8361 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8362 err = nfs4_find_root_sec(server, fhandle, info);
8363 goto out_freepage;
8364 }
8365 if (err)
8366 goto out_freepage;
8367
8368 for (i = 0; i < flavors->num_flavors; i++) {
8369 secinfo = &flavors->flavors[i];
8370
8371 switch (secinfo->flavor) {
8372 case RPC_AUTH_NULL:
8373 case RPC_AUTH_UNIX:
8374 case RPC_AUTH_GSS:
8375 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8376 &secinfo->flavor_info);
8377 break;
8378 default:
8379 flavor = RPC_AUTH_MAXFLAVOR;
8380 break;
8381 }
8382
8383 if (!nfs_auth_info_match(&server->auth_info, flavor))
8384 flavor = RPC_AUTH_MAXFLAVOR;
8385
8386 if (flavor != RPC_AUTH_MAXFLAVOR) {
8387 err = nfs4_lookup_root_sec(server, fhandle,
8388 info, flavor);
8389 if (!err)
8390 break;
8391 }
8392 }
8393
8394 if (flavor == RPC_AUTH_MAXFLAVOR)
8395 err = -EPERM;
8396
8397 out_freepage:
8398 put_page(page);
8399 if (err == -EACCES)
8400 return -EPERM;
8401 out:
8402 return err;
8403 }
8404
8405 static int _nfs41_test_stateid(struct nfs_server *server,
8406 nfs4_stateid *stateid,
8407 struct rpc_cred *cred)
8408 {
8409 int status;
8410 struct nfs41_test_stateid_args args = {
8411 .stateid = stateid,
8412 };
8413 struct nfs41_test_stateid_res res;
8414 struct rpc_message msg = {
8415 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8416 .rpc_argp = &args,
8417 .rpc_resp = &res,
8418 .rpc_cred = cred,
8419 };
8420 struct rpc_clnt *rpc_client = server->client;
8421
8422 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8423 &rpc_client, &msg);
8424
8425 dprintk("NFS call test_stateid %p\n", stateid);
8426 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8427 nfs4_set_sequence_privileged(&args.seq_args);
8428 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8429 &args.seq_args, &res.seq_res);
8430 if (status != NFS_OK) {
8431 dprintk("NFS reply test_stateid: failed, %d\n", status);
8432 return status;
8433 }
8434 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8435 return -res.status;
8436 }
8437
8438 /**
8439 * nfs41_test_stateid - perform a TEST_STATEID operation
8440 *
8441 * @server: server / transport on which to perform the operation
8442 * @stateid: state ID to test
8443 * @cred: credential
8444 *
8445 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8446 * Otherwise a negative NFS4ERR value is returned if the operation
8447 * failed or the state ID is not currently valid.
8448 */
8449 static int nfs41_test_stateid(struct nfs_server *server,
8450 nfs4_stateid *stateid,
8451 struct rpc_cred *cred)
8452 {
8453 struct nfs4_exception exception = { };
8454 int err;
8455 do {
8456 err = _nfs41_test_stateid(server, stateid, cred);
8457 if (err != -NFS4ERR_DELAY)
8458 break;
8459 nfs4_handle_exception(server, err, &exception);
8460 } while (exception.retry);
8461 return err;
8462 }
8463
8464 struct nfs_free_stateid_data {
8465 struct nfs_server *server;
8466 struct nfs41_free_stateid_args args;
8467 struct nfs41_free_stateid_res res;
8468 };
8469
8470 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8471 {
8472 struct nfs_free_stateid_data *data = calldata;
8473 nfs41_setup_sequence(nfs4_get_session(data->server),
8474 &data->args.seq_args,
8475 &data->res.seq_res,
8476 task);
8477 }
8478
8479 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8480 {
8481 struct nfs_free_stateid_data *data = calldata;
8482
8483 nfs41_sequence_done(task, &data->res.seq_res);
8484
8485 switch (task->tk_status) {
8486 case -NFS4ERR_DELAY:
8487 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8488 rpc_restart_call_prepare(task);
8489 }
8490 }
8491
8492 static void nfs41_free_stateid_release(void *calldata)
8493 {
8494 kfree(calldata);
8495 }
8496
8497 static const struct rpc_call_ops nfs41_free_stateid_ops = {
8498 .rpc_call_prepare = nfs41_free_stateid_prepare,
8499 .rpc_call_done = nfs41_free_stateid_done,
8500 .rpc_release = nfs41_free_stateid_release,
8501 };
8502
8503 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8504 nfs4_stateid *stateid,
8505 struct rpc_cred *cred,
8506 bool privileged)
8507 {
8508 struct rpc_message msg = {
8509 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8510 .rpc_cred = cred,
8511 };
8512 struct rpc_task_setup task_setup = {
8513 .rpc_client = server->client,
8514 .rpc_message = &msg,
8515 .callback_ops = &nfs41_free_stateid_ops,
8516 .flags = RPC_TASK_ASYNC,
8517 };
8518 struct nfs_free_stateid_data *data;
8519
8520 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8521 &task_setup.rpc_client, &msg);
8522
8523 dprintk("NFS call free_stateid %p\n", stateid);
8524 data = kmalloc(sizeof(*data), GFP_NOFS);
8525 if (!data)
8526 return ERR_PTR(-ENOMEM);
8527 data->server = server;
8528 nfs4_stateid_copy(&data->args.stateid, stateid);
8529
8530 task_setup.callback_data = data;
8531
8532 msg.rpc_argp = &data->args;
8533 msg.rpc_resp = &data->res;
8534 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8535 if (privileged)
8536 nfs4_set_sequence_privileged(&data->args.seq_args);
8537
8538 return rpc_run_task(&task_setup);
8539 }
8540
8541 /**
8542 * nfs41_free_stateid - perform a FREE_STATEID operation
8543 *
8544 * @server: server / transport on which to perform the operation
8545 * @stateid: state ID to release
8546 * @cred: credential
8547 *
8548 * Returns NFS_OK if the server freed "stateid". Otherwise a
8549 * negative NFS4ERR value is returned.
8550 */
8551 static int nfs41_free_stateid(struct nfs_server *server,
8552 nfs4_stateid *stateid,
8553 struct rpc_cred *cred)
8554 {
8555 struct rpc_task *task;
8556 int ret;
8557
8558 task = _nfs41_free_stateid(server, stateid, cred, true);
8559 if (IS_ERR(task))
8560 return PTR_ERR(task);
8561 ret = rpc_wait_for_completion_task(task);
8562 if (!ret)
8563 ret = task->tk_status;
8564 rpc_put_task(task);
8565 return ret;
8566 }
8567
8568 static void
8569 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8570 {
8571 struct rpc_task *task;
8572 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8573
8574 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8575 nfs4_free_lock_state(server, lsp);
8576 if (IS_ERR(task))
8577 return;
8578 rpc_put_task(task);
8579 }
8580
8581 static bool nfs41_match_stateid(const nfs4_stateid *s1,
8582 const nfs4_stateid *s2)
8583 {
8584 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8585 return false;
8586
8587 if (s1->seqid == s2->seqid)
8588 return true;
8589 if (s1->seqid == 0 || s2->seqid == 0)
8590 return true;
8591
8592 return false;
8593 }
8594
8595 #endif /* CONFIG_NFS_V4_1 */
8596
8597 static bool nfs4_match_stateid(const nfs4_stateid *s1,
8598 const nfs4_stateid *s2)
8599 {
8600 return nfs4_stateid_match(s1, s2);
8601 }
8602
8603
8604 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8605 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8606 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8607 .recover_open = nfs4_open_reclaim,
8608 .recover_lock = nfs4_lock_reclaim,
8609 .establish_clid = nfs4_init_clientid,
8610 .detect_trunking = nfs40_discover_server_trunking,
8611 };
8612
8613 #if defined(CONFIG_NFS_V4_1)
8614 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8615 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8616 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8617 .recover_open = nfs4_open_reclaim,
8618 .recover_lock = nfs4_lock_reclaim,
8619 .establish_clid = nfs41_init_clientid,
8620 .reclaim_complete = nfs41_proc_reclaim_complete,
8621 .detect_trunking = nfs41_discover_server_trunking,
8622 };
8623 #endif /* CONFIG_NFS_V4_1 */
8624
8625 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8626 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8627 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8628 .recover_open = nfs40_open_expired,
8629 .recover_lock = nfs4_lock_expired,
8630 .establish_clid = nfs4_init_clientid,
8631 };
8632
8633 #if defined(CONFIG_NFS_V4_1)
8634 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8635 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8636 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8637 .recover_open = nfs41_open_expired,
8638 .recover_lock = nfs41_lock_expired,
8639 .establish_clid = nfs41_init_clientid,
8640 };
8641 #endif /* CONFIG_NFS_V4_1 */
8642
8643 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8644 .sched_state_renewal = nfs4_proc_async_renew,
8645 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8646 .renew_lease = nfs4_proc_renew,
8647 };
8648
8649 #if defined(CONFIG_NFS_V4_1)
8650 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8651 .sched_state_renewal = nfs41_proc_async_sequence,
8652 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8653 .renew_lease = nfs4_proc_sequence,
8654 };
8655 #endif
8656
8657 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8658 .get_locations = _nfs40_proc_get_locations,
8659 .fsid_present = _nfs40_proc_fsid_present,
8660 };
8661
8662 #if defined(CONFIG_NFS_V4_1)
8663 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8664 .get_locations = _nfs41_proc_get_locations,
8665 .fsid_present = _nfs41_proc_fsid_present,
8666 };
8667 #endif /* CONFIG_NFS_V4_1 */
8668
8669 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8670 .minor_version = 0,
8671 .init_caps = NFS_CAP_READDIRPLUS
8672 | NFS_CAP_ATOMIC_OPEN
8673 | NFS_CAP_POSIX_LOCK,
8674 .init_client = nfs40_init_client,
8675 .shutdown_client = nfs40_shutdown_client,
8676 .match_stateid = nfs4_match_stateid,
8677 .find_root_sec = nfs4_find_root_sec,
8678 .free_lock_state = nfs4_release_lockowner,
8679 .alloc_seqid = nfs_alloc_seqid,
8680 .call_sync_ops = &nfs40_call_sync_ops,
8681 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8682 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8683 .state_renewal_ops = &nfs40_state_renewal_ops,
8684 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8685 };
8686
8687 #if defined(CONFIG_NFS_V4_1)
8688 static struct nfs_seqid *
8689 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8690 {
8691 return NULL;
8692 }
8693
8694 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8695 .minor_version = 1,
8696 .init_caps = NFS_CAP_READDIRPLUS
8697 | NFS_CAP_ATOMIC_OPEN
8698 | NFS_CAP_POSIX_LOCK
8699 | NFS_CAP_STATEID_NFSV41
8700 | NFS_CAP_ATOMIC_OPEN_V1,
8701 .init_client = nfs41_init_client,
8702 .shutdown_client = nfs41_shutdown_client,
8703 .match_stateid = nfs41_match_stateid,
8704 .find_root_sec = nfs41_find_root_sec,
8705 .free_lock_state = nfs41_free_lock_state,
8706 .alloc_seqid = nfs_alloc_no_seqid,
8707 .call_sync_ops = &nfs41_call_sync_ops,
8708 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8709 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8710 .state_renewal_ops = &nfs41_state_renewal_ops,
8711 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8712 };
8713 #endif
8714
8715 #if defined(CONFIG_NFS_V4_2)
8716 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8717 .minor_version = 2,
8718 .init_caps = NFS_CAP_READDIRPLUS
8719 | NFS_CAP_ATOMIC_OPEN
8720 | NFS_CAP_POSIX_LOCK
8721 | NFS_CAP_STATEID_NFSV41
8722 | NFS_CAP_ATOMIC_OPEN_V1
8723 | NFS_CAP_ALLOCATE
8724 | NFS_CAP_DEALLOCATE
8725 | NFS_CAP_SEEK
8726 | NFS_CAP_LAYOUTSTATS
8727 | NFS_CAP_CLONE,
8728 .init_client = nfs41_init_client,
8729 .shutdown_client = nfs41_shutdown_client,
8730 .match_stateid = nfs41_match_stateid,
8731 .find_root_sec = nfs41_find_root_sec,
8732 .free_lock_state = nfs41_free_lock_state,
8733 .call_sync_ops = &nfs41_call_sync_ops,
8734 .alloc_seqid = nfs_alloc_no_seqid,
8735 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8736 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8737 .state_renewal_ops = &nfs41_state_renewal_ops,
8738 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8739 };
8740 #endif
8741
8742 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8743 [0] = &nfs_v4_0_minor_ops,
8744 #if defined(CONFIG_NFS_V4_1)
8745 [1] = &nfs_v4_1_minor_ops,
8746 #endif
8747 #if defined(CONFIG_NFS_V4_2)
8748 [2] = &nfs_v4_2_minor_ops,
8749 #endif
8750 };
8751
8752 static const struct inode_operations nfs4_dir_inode_operations = {
8753 .create = nfs_create,
8754 .lookup = nfs_lookup,
8755 .atomic_open = nfs_atomic_open,
8756 .link = nfs_link,
8757 .unlink = nfs_unlink,
8758 .symlink = nfs_symlink,
8759 .mkdir = nfs_mkdir,
8760 .rmdir = nfs_rmdir,
8761 .mknod = nfs_mknod,
8762 .rename = nfs_rename,
8763 .permission = nfs_permission,
8764 .getattr = nfs_getattr,
8765 .setattr = nfs_setattr,
8766 .getxattr = generic_getxattr,
8767 .setxattr = generic_setxattr,
8768 .listxattr = generic_listxattr,
8769 .removexattr = generic_removexattr,
8770 };
8771
8772 static const struct inode_operations nfs4_file_inode_operations = {
8773 .permission = nfs_permission,
8774 .getattr = nfs_getattr,
8775 .setattr = nfs_setattr,
8776 .getxattr = generic_getxattr,
8777 .setxattr = generic_setxattr,
8778 .listxattr = generic_listxattr,
8779 .removexattr = generic_removexattr,
8780 };
8781
8782 const struct nfs_rpc_ops nfs_v4_clientops = {
8783 .version = 4, /* protocol version */
8784 .dentry_ops = &nfs4_dentry_operations,
8785 .dir_inode_ops = &nfs4_dir_inode_operations,
8786 .file_inode_ops = &nfs4_file_inode_operations,
8787 .file_ops = &nfs4_file_operations,
8788 .getroot = nfs4_proc_get_root,
8789 .submount = nfs4_submount,
8790 .try_mount = nfs4_try_mount,
8791 .getattr = nfs4_proc_getattr,
8792 .setattr = nfs4_proc_setattr,
8793 .lookup = nfs4_proc_lookup,
8794 .access = nfs4_proc_access,
8795 .readlink = nfs4_proc_readlink,
8796 .create = nfs4_proc_create,
8797 .remove = nfs4_proc_remove,
8798 .unlink_setup = nfs4_proc_unlink_setup,
8799 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8800 .unlink_done = nfs4_proc_unlink_done,
8801 .rename_setup = nfs4_proc_rename_setup,
8802 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8803 .rename_done = nfs4_proc_rename_done,
8804 .link = nfs4_proc_link,
8805 .symlink = nfs4_proc_symlink,
8806 .mkdir = nfs4_proc_mkdir,
8807 .rmdir = nfs4_proc_remove,
8808 .readdir = nfs4_proc_readdir,
8809 .mknod = nfs4_proc_mknod,
8810 .statfs = nfs4_proc_statfs,
8811 .fsinfo = nfs4_proc_fsinfo,
8812 .pathconf = nfs4_proc_pathconf,
8813 .set_capabilities = nfs4_server_capabilities,
8814 .decode_dirent = nfs4_decode_dirent,
8815 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8816 .read_setup = nfs4_proc_read_setup,
8817 .read_done = nfs4_read_done,
8818 .write_setup = nfs4_proc_write_setup,
8819 .write_done = nfs4_write_done,
8820 .commit_setup = nfs4_proc_commit_setup,
8821 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
8822 .commit_done = nfs4_commit_done,
8823 .lock = nfs4_proc_lock,
8824 .clear_acl_cache = nfs4_zap_acl_attr,
8825 .close_context = nfs4_close_context,
8826 .open_context = nfs4_atomic_open,
8827 .have_delegation = nfs4_have_delegation,
8828 .return_delegation = nfs4_inode_return_delegation,
8829 .alloc_client = nfs4_alloc_client,
8830 .init_client = nfs4_init_client,
8831 .free_client = nfs4_free_client,
8832 .create_server = nfs4_create_server,
8833 .clone_server = nfs_clone_server,
8834 };
8835
8836 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
8837 .prefix = XATTR_NAME_NFSV4_ACL,
8838 .list = nfs4_xattr_list_nfs4_acl,
8839 .get = nfs4_xattr_get_nfs4_acl,
8840 .set = nfs4_xattr_set_nfs4_acl,
8841 };
8842
8843 const struct xattr_handler *nfs4_xattr_handlers[] = {
8844 &nfs4_xattr_nfs4_acl_handler,
8845 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
8846 &nfs4_xattr_nfs4_label_handler,
8847 #endif
8848 NULL
8849 };
8850
8851 /*
8852 * Local variables:
8853 * c-basic-offset: 8
8854 * End:
8855 */