]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - fs/nfs/nfs4proc.c
cfg80211: fix proto in ieee80211_data_to_8023 for frames without LLC header
[mirror_ubuntu-focal-kernel.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/file.h>
42 #include <linux/string.h>
43 #include <linux/ratelimit.h>
44 #include <linux/printk.h>
45 #include <linux/slab.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4idmap.h"
67 #include "nfs4session.h"
68 #include "fscache.h"
69
70 #include "nfs4trace.h"
71
72 #define NFSDBG_FACILITY NFSDBG_PROC
73
74 #define NFS4_POLL_RETRY_MIN (HZ/10)
75 #define NFS4_POLL_RETRY_MAX (15*HZ)
76
77 struct nfs4_opendata;
78 static int _nfs4_proc_open(struct nfs4_opendata *data);
79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
82 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
84 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
85 struct nfs_fattr *fattr, struct iattr *sattr,
86 struct nfs4_state *state, struct nfs4_label *ilabel,
87 struct nfs4_label *olabel);
88 #ifdef CONFIG_NFS_V4_1
89 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
90 struct rpc_cred *);
91 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
92 struct rpc_cred *);
93 #endif
94
95 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
96 static inline struct nfs4_label *
97 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
98 struct iattr *sattr, struct nfs4_label *label)
99 {
100 int err;
101
102 if (label == NULL)
103 return NULL;
104
105 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
106 return NULL;
107
108 err = security_dentry_init_security(dentry, sattr->ia_mode,
109 &dentry->d_name, (void **)&label->label, &label->len);
110 if (err == 0)
111 return label;
112
113 return NULL;
114 }
115 static inline void
116 nfs4_label_release_security(struct nfs4_label *label)
117 {
118 if (label)
119 security_release_secctx(label->label, label->len);
120 }
121 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
122 {
123 if (label)
124 return server->attr_bitmask;
125
126 return server->attr_bitmask_nl;
127 }
128 #else
129 static inline struct nfs4_label *
130 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
131 struct iattr *sattr, struct nfs4_label *l)
132 { return NULL; }
133 static inline void
134 nfs4_label_release_security(struct nfs4_label *label)
135 { return; }
136 static inline u32 *
137 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
138 { return server->attr_bitmask; }
139 #endif
140
141 /* Prevent leaks of NFSv4 errors into userland */
142 static int nfs4_map_errors(int err)
143 {
144 if (err >= -1000)
145 return err;
146 switch (err) {
147 case -NFS4ERR_RESOURCE:
148 case -NFS4ERR_LAYOUTTRYLATER:
149 case -NFS4ERR_RECALLCONFLICT:
150 return -EREMOTEIO;
151 case -NFS4ERR_WRONGSEC:
152 case -NFS4ERR_WRONG_CRED:
153 return -EPERM;
154 case -NFS4ERR_BADOWNER:
155 case -NFS4ERR_BADNAME:
156 return -EINVAL;
157 case -NFS4ERR_SHARE_DENIED:
158 return -EACCES;
159 case -NFS4ERR_MINOR_VERS_MISMATCH:
160 return -EPROTONOSUPPORT;
161 case -NFS4ERR_FILE_OPEN:
162 return -EBUSY;
163 default:
164 dprintk("%s could not handle NFSv4 error %d\n",
165 __func__, -err);
166 break;
167 }
168 return -EIO;
169 }
170
171 /*
172 * This is our standard bitmap for GETATTR requests.
173 */
174 const u32 nfs4_fattr_bitmap[3] = {
175 FATTR4_WORD0_TYPE
176 | FATTR4_WORD0_CHANGE
177 | FATTR4_WORD0_SIZE
178 | FATTR4_WORD0_FSID
179 | FATTR4_WORD0_FILEID,
180 FATTR4_WORD1_MODE
181 | FATTR4_WORD1_NUMLINKS
182 | FATTR4_WORD1_OWNER
183 | FATTR4_WORD1_OWNER_GROUP
184 | FATTR4_WORD1_RAWDEV
185 | FATTR4_WORD1_SPACE_USED
186 | FATTR4_WORD1_TIME_ACCESS
187 | FATTR4_WORD1_TIME_METADATA
188 | FATTR4_WORD1_TIME_MODIFY
189 | FATTR4_WORD1_MOUNTED_ON_FILEID,
190 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
191 FATTR4_WORD2_SECURITY_LABEL
192 #endif
193 };
194
195 static const u32 nfs4_pnfs_open_bitmap[3] = {
196 FATTR4_WORD0_TYPE
197 | FATTR4_WORD0_CHANGE
198 | FATTR4_WORD0_SIZE
199 | FATTR4_WORD0_FSID
200 | FATTR4_WORD0_FILEID,
201 FATTR4_WORD1_MODE
202 | FATTR4_WORD1_NUMLINKS
203 | FATTR4_WORD1_OWNER
204 | FATTR4_WORD1_OWNER_GROUP
205 | FATTR4_WORD1_RAWDEV
206 | FATTR4_WORD1_SPACE_USED
207 | FATTR4_WORD1_TIME_ACCESS
208 | FATTR4_WORD1_TIME_METADATA
209 | FATTR4_WORD1_TIME_MODIFY,
210 FATTR4_WORD2_MDSTHRESHOLD
211 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
212 | FATTR4_WORD2_SECURITY_LABEL
213 #endif
214 };
215
216 static const u32 nfs4_open_noattr_bitmap[3] = {
217 FATTR4_WORD0_TYPE
218 | FATTR4_WORD0_CHANGE
219 | FATTR4_WORD0_FILEID,
220 };
221
222 const u32 nfs4_statfs_bitmap[3] = {
223 FATTR4_WORD0_FILES_AVAIL
224 | FATTR4_WORD0_FILES_FREE
225 | FATTR4_WORD0_FILES_TOTAL,
226 FATTR4_WORD1_SPACE_AVAIL
227 | FATTR4_WORD1_SPACE_FREE
228 | FATTR4_WORD1_SPACE_TOTAL
229 };
230
231 const u32 nfs4_pathconf_bitmap[3] = {
232 FATTR4_WORD0_MAXLINK
233 | FATTR4_WORD0_MAXNAME,
234 0
235 };
236
237 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
238 | FATTR4_WORD0_MAXREAD
239 | FATTR4_WORD0_MAXWRITE
240 | FATTR4_WORD0_LEASE_TIME,
241 FATTR4_WORD1_TIME_DELTA
242 | FATTR4_WORD1_FS_LAYOUT_TYPES,
243 FATTR4_WORD2_LAYOUT_BLKSIZE
244 | FATTR4_WORD2_CLONE_BLKSIZE
245 };
246
247 const u32 nfs4_fs_locations_bitmap[3] = {
248 FATTR4_WORD0_TYPE
249 | FATTR4_WORD0_CHANGE
250 | FATTR4_WORD0_SIZE
251 | FATTR4_WORD0_FSID
252 | FATTR4_WORD0_FILEID
253 | FATTR4_WORD0_FS_LOCATIONS,
254 FATTR4_WORD1_MODE
255 | FATTR4_WORD1_NUMLINKS
256 | FATTR4_WORD1_OWNER
257 | FATTR4_WORD1_OWNER_GROUP
258 | FATTR4_WORD1_RAWDEV
259 | FATTR4_WORD1_SPACE_USED
260 | FATTR4_WORD1_TIME_ACCESS
261 | FATTR4_WORD1_TIME_METADATA
262 | FATTR4_WORD1_TIME_MODIFY
263 | FATTR4_WORD1_MOUNTED_ON_FILEID,
264 };
265
266 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
267 struct nfs4_readdir_arg *readdir)
268 {
269 __be32 *start, *p;
270
271 if (cookie > 2) {
272 readdir->cookie = cookie;
273 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
274 return;
275 }
276
277 readdir->cookie = 0;
278 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
279 if (cookie == 2)
280 return;
281
282 /*
283 * NFSv4 servers do not return entries for '.' and '..'
284 * Therefore, we fake these entries here. We let '.'
285 * have cookie 0 and '..' have cookie 1. Note that
286 * when talking to the server, we always send cookie 0
287 * instead of 1 or 2.
288 */
289 start = p = kmap_atomic(*readdir->pages);
290
291 if (cookie == 0) {
292 *p++ = xdr_one; /* next */
293 *p++ = xdr_zero; /* cookie, first word */
294 *p++ = xdr_one; /* cookie, second word */
295 *p++ = xdr_one; /* entry len */
296 memcpy(p, ".\0\0\0", 4); /* entry */
297 p++;
298 *p++ = xdr_one; /* bitmap length */
299 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
300 *p++ = htonl(8); /* attribute buffer length */
301 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
302 }
303
304 *p++ = xdr_one; /* next */
305 *p++ = xdr_zero; /* cookie, first word */
306 *p++ = xdr_two; /* cookie, second word */
307 *p++ = xdr_two; /* entry len */
308 memcpy(p, "..\0\0", 4); /* entry */
309 p++;
310 *p++ = xdr_one; /* bitmap length */
311 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
312 *p++ = htonl(8); /* attribute buffer length */
313 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
314
315 readdir->pgbase = (char *)p - (char *)start;
316 readdir->count -= readdir->pgbase;
317 kunmap_atomic(start);
318 }
319
320 static long nfs4_update_delay(long *timeout)
321 {
322 long ret;
323 if (!timeout)
324 return NFS4_POLL_RETRY_MAX;
325 if (*timeout <= 0)
326 *timeout = NFS4_POLL_RETRY_MIN;
327 if (*timeout > NFS4_POLL_RETRY_MAX)
328 *timeout = NFS4_POLL_RETRY_MAX;
329 ret = *timeout;
330 *timeout <<= 1;
331 return ret;
332 }
333
334 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
335 {
336 int res = 0;
337
338 might_sleep();
339
340 freezable_schedule_timeout_killable_unsafe(
341 nfs4_update_delay(timeout));
342 if (fatal_signal_pending(current))
343 res = -ERESTARTSYS;
344 return res;
345 }
346
347 /* This is the error handling routine for processes that are allowed
348 * to sleep.
349 */
350 static int nfs4_do_handle_exception(struct nfs_server *server,
351 int errorcode, struct nfs4_exception *exception)
352 {
353 struct nfs_client *clp = server->nfs_client;
354 struct nfs4_state *state = exception->state;
355 struct inode *inode = exception->inode;
356 int ret = errorcode;
357
358 exception->delay = 0;
359 exception->recovering = 0;
360 exception->retry = 0;
361 switch(errorcode) {
362 case 0:
363 return 0;
364 case -NFS4ERR_OPENMODE:
365 case -NFS4ERR_DELEG_REVOKED:
366 case -NFS4ERR_ADMIN_REVOKED:
367 case -NFS4ERR_BAD_STATEID:
368 if (inode && nfs_async_inode_return_delegation(inode,
369 NULL) == 0)
370 goto wait_on_recovery;
371 if (state == NULL)
372 break;
373 ret = nfs4_schedule_stateid_recovery(server, state);
374 if (ret < 0)
375 break;
376 goto wait_on_recovery;
377 case -NFS4ERR_EXPIRED:
378 if (state != NULL) {
379 ret = nfs4_schedule_stateid_recovery(server, state);
380 if (ret < 0)
381 break;
382 }
383 case -NFS4ERR_STALE_STATEID:
384 case -NFS4ERR_STALE_CLIENTID:
385 nfs4_schedule_lease_recovery(clp);
386 goto wait_on_recovery;
387 case -NFS4ERR_MOVED:
388 ret = nfs4_schedule_migration_recovery(server);
389 if (ret < 0)
390 break;
391 goto wait_on_recovery;
392 case -NFS4ERR_LEASE_MOVED:
393 nfs4_schedule_lease_moved_recovery(clp);
394 goto wait_on_recovery;
395 #if defined(CONFIG_NFS_V4_1)
396 case -NFS4ERR_BADSESSION:
397 case -NFS4ERR_BADSLOT:
398 case -NFS4ERR_BAD_HIGH_SLOT:
399 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
400 case -NFS4ERR_DEADSESSION:
401 case -NFS4ERR_SEQ_FALSE_RETRY:
402 case -NFS4ERR_SEQ_MISORDERED:
403 dprintk("%s ERROR: %d Reset session\n", __func__,
404 errorcode);
405 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
406 goto wait_on_recovery;
407 #endif /* defined(CONFIG_NFS_V4_1) */
408 case -NFS4ERR_FILE_OPEN:
409 if (exception->timeout > HZ) {
410 /* We have retried a decent amount, time to
411 * fail
412 */
413 ret = -EBUSY;
414 break;
415 }
416 case -NFS4ERR_DELAY:
417 nfs_inc_server_stats(server, NFSIOS_DELAY);
418 case -NFS4ERR_GRACE:
419 exception->delay = 1;
420 return 0;
421
422 case -NFS4ERR_RETRY_UNCACHED_REP:
423 case -NFS4ERR_OLD_STATEID:
424 exception->retry = 1;
425 break;
426 case -NFS4ERR_BADOWNER:
427 /* The following works around a Linux server bug! */
428 case -NFS4ERR_BADNAME:
429 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
430 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
431 exception->retry = 1;
432 printk(KERN_WARNING "NFS: v4 server %s "
433 "does not accept raw "
434 "uid/gids. "
435 "Reenabling the idmapper.\n",
436 server->nfs_client->cl_hostname);
437 }
438 }
439 /* We failed to handle the error */
440 return nfs4_map_errors(ret);
441 wait_on_recovery:
442 exception->recovering = 1;
443 return 0;
444 }
445
446 /* This is the error handling routine for processes that are allowed
447 * to sleep.
448 */
449 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
450 {
451 struct nfs_client *clp = server->nfs_client;
452 int ret;
453
454 ret = nfs4_do_handle_exception(server, errorcode, exception);
455 if (exception->delay) {
456 ret = nfs4_delay(server->client, &exception->timeout);
457 goto out_retry;
458 }
459 if (exception->recovering) {
460 ret = nfs4_wait_clnt_recover(clp);
461 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
462 return -EIO;
463 goto out_retry;
464 }
465 return ret;
466 out_retry:
467 if (ret == 0)
468 exception->retry = 1;
469 return ret;
470 }
471
472 static int
473 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
474 int errorcode, struct nfs4_exception *exception)
475 {
476 struct nfs_client *clp = server->nfs_client;
477 int ret;
478
479 ret = nfs4_do_handle_exception(server, errorcode, exception);
480 if (exception->delay) {
481 rpc_delay(task, nfs4_update_delay(&exception->timeout));
482 goto out_retry;
483 }
484 if (exception->recovering) {
485 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
486 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
487 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
488 goto out_retry;
489 }
490 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
491 ret = -EIO;
492 return ret;
493 out_retry:
494 if (ret == 0)
495 exception->retry = 1;
496 return ret;
497 }
498
499 static int
500 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
501 struct nfs4_state *state, long *timeout)
502 {
503 struct nfs4_exception exception = {
504 .state = state,
505 };
506
507 if (task->tk_status >= 0)
508 return 0;
509 if (timeout)
510 exception.timeout = *timeout;
511 task->tk_status = nfs4_async_handle_exception(task, server,
512 task->tk_status,
513 &exception);
514 if (exception.delay && timeout)
515 *timeout = exception.timeout;
516 if (exception.retry)
517 return -EAGAIN;
518 return 0;
519 }
520
521 /*
522 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
523 * or 'false' otherwise.
524 */
525 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
526 {
527 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
528
529 if (flavor == RPC_AUTH_GSS_KRB5I ||
530 flavor == RPC_AUTH_GSS_KRB5P)
531 return true;
532
533 return false;
534 }
535
536 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
537 {
538 spin_lock(&clp->cl_lock);
539 if (time_before(clp->cl_last_renewal,timestamp))
540 clp->cl_last_renewal = timestamp;
541 spin_unlock(&clp->cl_lock);
542 }
543
544 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
545 {
546 struct nfs_client *clp = server->nfs_client;
547
548 if (!nfs4_has_session(clp))
549 do_renew_lease(clp, timestamp);
550 }
551
552 struct nfs4_call_sync_data {
553 const struct nfs_server *seq_server;
554 struct nfs4_sequence_args *seq_args;
555 struct nfs4_sequence_res *seq_res;
556 };
557
558 void nfs4_init_sequence(struct nfs4_sequence_args *args,
559 struct nfs4_sequence_res *res, int cache_reply)
560 {
561 args->sa_slot = NULL;
562 args->sa_cache_this = cache_reply;
563 args->sa_privileged = 0;
564
565 res->sr_slot = NULL;
566 }
567
568 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
569 {
570 args->sa_privileged = 1;
571 }
572
573 int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
574 struct nfs4_sequence_args *args,
575 struct nfs4_sequence_res *res,
576 struct rpc_task *task)
577 {
578 struct nfs4_slot *slot;
579
580 /* slot already allocated? */
581 if (res->sr_slot != NULL)
582 goto out_start;
583
584 spin_lock(&tbl->slot_tbl_lock);
585 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
586 goto out_sleep;
587
588 slot = nfs4_alloc_slot(tbl);
589 if (IS_ERR(slot)) {
590 if (slot == ERR_PTR(-ENOMEM))
591 task->tk_timeout = HZ >> 2;
592 goto out_sleep;
593 }
594 spin_unlock(&tbl->slot_tbl_lock);
595
596 args->sa_slot = slot;
597 res->sr_slot = slot;
598
599 out_start:
600 rpc_call_start(task);
601 return 0;
602
603 out_sleep:
604 if (args->sa_privileged)
605 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
606 NULL, RPC_PRIORITY_PRIVILEGED);
607 else
608 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
609 spin_unlock(&tbl->slot_tbl_lock);
610 return -EAGAIN;
611 }
612 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
613
614 static int nfs40_sequence_done(struct rpc_task *task,
615 struct nfs4_sequence_res *res)
616 {
617 struct nfs4_slot *slot = res->sr_slot;
618 struct nfs4_slot_table *tbl;
619
620 if (slot == NULL)
621 goto out;
622
623 tbl = slot->table;
624 spin_lock(&tbl->slot_tbl_lock);
625 if (!nfs41_wake_and_assign_slot(tbl, slot))
626 nfs4_free_slot(tbl, slot);
627 spin_unlock(&tbl->slot_tbl_lock);
628
629 res->sr_slot = NULL;
630 out:
631 return 1;
632 }
633
634 #if defined(CONFIG_NFS_V4_1)
635
636 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
637 {
638 struct nfs4_session *session;
639 struct nfs4_slot_table *tbl;
640 struct nfs4_slot *slot = res->sr_slot;
641 bool send_new_highest_used_slotid = false;
642
643 tbl = slot->table;
644 session = tbl->session;
645
646 spin_lock(&tbl->slot_tbl_lock);
647 /* Be nice to the server: try to ensure that the last transmitted
648 * value for highest_user_slotid <= target_highest_slotid
649 */
650 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
651 send_new_highest_used_slotid = true;
652
653 if (nfs41_wake_and_assign_slot(tbl, slot)) {
654 send_new_highest_used_slotid = false;
655 goto out_unlock;
656 }
657 nfs4_free_slot(tbl, slot);
658
659 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
660 send_new_highest_used_slotid = false;
661 out_unlock:
662 spin_unlock(&tbl->slot_tbl_lock);
663 res->sr_slot = NULL;
664 if (send_new_highest_used_slotid)
665 nfs41_notify_server(session->clp);
666 }
667
668 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
669 {
670 struct nfs4_session *session;
671 struct nfs4_slot *slot = res->sr_slot;
672 struct nfs_client *clp;
673 bool interrupted = false;
674 int ret = 1;
675
676 if (slot == NULL)
677 goto out_noaction;
678 /* don't increment the sequence number if the task wasn't sent */
679 if (!RPC_WAS_SENT(task))
680 goto out;
681
682 session = slot->table->session;
683
684 if (slot->interrupted) {
685 slot->interrupted = 0;
686 interrupted = true;
687 }
688
689 trace_nfs4_sequence_done(session, res);
690 /* Check the SEQUENCE operation status */
691 switch (res->sr_status) {
692 case 0:
693 /* Update the slot's sequence and clientid lease timer */
694 ++slot->seq_nr;
695 clp = session->clp;
696 do_renew_lease(clp, res->sr_timestamp);
697 /* Check sequence flags */
698 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
699 nfs41_update_target_slotid(slot->table, slot, res);
700 break;
701 case 1:
702 /*
703 * sr_status remains 1 if an RPC level error occurred.
704 * The server may or may not have processed the sequence
705 * operation..
706 * Mark the slot as having hosted an interrupted RPC call.
707 */
708 slot->interrupted = 1;
709 goto out;
710 case -NFS4ERR_DELAY:
711 /* The server detected a resend of the RPC call and
712 * returned NFS4ERR_DELAY as per Section 2.10.6.2
713 * of RFC5661.
714 */
715 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
716 __func__,
717 slot->slot_nr,
718 slot->seq_nr);
719 goto out_retry;
720 case -NFS4ERR_BADSLOT:
721 /*
722 * The slot id we used was probably retired. Try again
723 * using a different slot id.
724 */
725 goto retry_nowait;
726 case -NFS4ERR_SEQ_MISORDERED:
727 /*
728 * Was the last operation on this sequence interrupted?
729 * If so, retry after bumping the sequence number.
730 */
731 if (interrupted) {
732 ++slot->seq_nr;
733 goto retry_nowait;
734 }
735 /*
736 * Could this slot have been previously retired?
737 * If so, then the server may be expecting seq_nr = 1!
738 */
739 if (slot->seq_nr != 1) {
740 slot->seq_nr = 1;
741 goto retry_nowait;
742 }
743 break;
744 case -NFS4ERR_SEQ_FALSE_RETRY:
745 ++slot->seq_nr;
746 goto retry_nowait;
747 default:
748 /* Just update the slot sequence no. */
749 ++slot->seq_nr;
750 }
751 out:
752 /* The session may be reset by one of the error handlers. */
753 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
754 nfs41_sequence_free_slot(res);
755 out_noaction:
756 return ret;
757 retry_nowait:
758 if (rpc_restart_call_prepare(task)) {
759 task->tk_status = 0;
760 ret = 0;
761 }
762 goto out;
763 out_retry:
764 if (!rpc_restart_call(task))
765 goto out;
766 rpc_delay(task, NFS4_POLL_RETRY_MAX);
767 return 0;
768 }
769 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
770
771 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
772 {
773 if (res->sr_slot == NULL)
774 return 1;
775 if (!res->sr_slot->table->session)
776 return nfs40_sequence_done(task, res);
777 return nfs41_sequence_done(task, res);
778 }
779 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
780
781 int nfs41_setup_sequence(struct nfs4_session *session,
782 struct nfs4_sequence_args *args,
783 struct nfs4_sequence_res *res,
784 struct rpc_task *task)
785 {
786 struct nfs4_slot *slot;
787 struct nfs4_slot_table *tbl;
788
789 dprintk("--> %s\n", __func__);
790 /* slot already allocated? */
791 if (res->sr_slot != NULL)
792 goto out_success;
793
794 tbl = &session->fc_slot_table;
795
796 task->tk_timeout = 0;
797
798 spin_lock(&tbl->slot_tbl_lock);
799 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
800 !args->sa_privileged) {
801 /* The state manager will wait until the slot table is empty */
802 dprintk("%s session is draining\n", __func__);
803 goto out_sleep;
804 }
805
806 slot = nfs4_alloc_slot(tbl);
807 if (IS_ERR(slot)) {
808 /* If out of memory, try again in 1/4 second */
809 if (slot == ERR_PTR(-ENOMEM))
810 task->tk_timeout = HZ >> 2;
811 dprintk("<-- %s: no free slots\n", __func__);
812 goto out_sleep;
813 }
814 spin_unlock(&tbl->slot_tbl_lock);
815
816 args->sa_slot = slot;
817
818 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
819 slot->slot_nr, slot->seq_nr);
820
821 res->sr_slot = slot;
822 res->sr_timestamp = jiffies;
823 res->sr_status_flags = 0;
824 /*
825 * sr_status is only set in decode_sequence, and so will remain
826 * set to 1 if an rpc level failure occurs.
827 */
828 res->sr_status = 1;
829 trace_nfs4_setup_sequence(session, args);
830 out_success:
831 rpc_call_start(task);
832 return 0;
833 out_sleep:
834 /* Privileged tasks are queued with top priority */
835 if (args->sa_privileged)
836 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
837 NULL, RPC_PRIORITY_PRIVILEGED);
838 else
839 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
840 spin_unlock(&tbl->slot_tbl_lock);
841 return -EAGAIN;
842 }
843 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
844
845 static int nfs4_setup_sequence(const struct nfs_server *server,
846 struct nfs4_sequence_args *args,
847 struct nfs4_sequence_res *res,
848 struct rpc_task *task)
849 {
850 struct nfs4_session *session = nfs4_get_session(server);
851 int ret = 0;
852
853 if (!session)
854 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
855 args, res, task);
856
857 dprintk("--> %s clp %p session %p sr_slot %u\n",
858 __func__, session->clp, session, res->sr_slot ?
859 res->sr_slot->slot_nr : NFS4_NO_SLOT);
860
861 ret = nfs41_setup_sequence(session, args, res, task);
862
863 dprintk("<-- %s status=%d\n", __func__, ret);
864 return ret;
865 }
866
867 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
868 {
869 struct nfs4_call_sync_data *data = calldata;
870 struct nfs4_session *session = nfs4_get_session(data->seq_server);
871
872 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
873
874 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
875 }
876
877 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
878 {
879 struct nfs4_call_sync_data *data = calldata;
880
881 nfs41_sequence_done(task, data->seq_res);
882 }
883
884 static const struct rpc_call_ops nfs41_call_sync_ops = {
885 .rpc_call_prepare = nfs41_call_sync_prepare,
886 .rpc_call_done = nfs41_call_sync_done,
887 };
888
889 #else /* !CONFIG_NFS_V4_1 */
890
891 static int nfs4_setup_sequence(const struct nfs_server *server,
892 struct nfs4_sequence_args *args,
893 struct nfs4_sequence_res *res,
894 struct rpc_task *task)
895 {
896 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
897 args, res, task);
898 }
899
900 int nfs4_sequence_done(struct rpc_task *task,
901 struct nfs4_sequence_res *res)
902 {
903 return nfs40_sequence_done(task, res);
904 }
905 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
906
907 #endif /* !CONFIG_NFS_V4_1 */
908
909 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
910 {
911 struct nfs4_call_sync_data *data = calldata;
912 nfs4_setup_sequence(data->seq_server,
913 data->seq_args, data->seq_res, task);
914 }
915
916 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
917 {
918 struct nfs4_call_sync_data *data = calldata;
919 nfs4_sequence_done(task, data->seq_res);
920 }
921
922 static const struct rpc_call_ops nfs40_call_sync_ops = {
923 .rpc_call_prepare = nfs40_call_sync_prepare,
924 .rpc_call_done = nfs40_call_sync_done,
925 };
926
927 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
928 struct nfs_server *server,
929 struct rpc_message *msg,
930 struct nfs4_sequence_args *args,
931 struct nfs4_sequence_res *res)
932 {
933 int ret;
934 struct rpc_task *task;
935 struct nfs_client *clp = server->nfs_client;
936 struct nfs4_call_sync_data data = {
937 .seq_server = server,
938 .seq_args = args,
939 .seq_res = res,
940 };
941 struct rpc_task_setup task_setup = {
942 .rpc_client = clnt,
943 .rpc_message = msg,
944 .callback_ops = clp->cl_mvops->call_sync_ops,
945 .callback_data = &data
946 };
947
948 task = rpc_run_task(&task_setup);
949 if (IS_ERR(task))
950 ret = PTR_ERR(task);
951 else {
952 ret = task->tk_status;
953 rpc_put_task(task);
954 }
955 return ret;
956 }
957
958 int nfs4_call_sync(struct rpc_clnt *clnt,
959 struct nfs_server *server,
960 struct rpc_message *msg,
961 struct nfs4_sequence_args *args,
962 struct nfs4_sequence_res *res,
963 int cache_reply)
964 {
965 nfs4_init_sequence(args, res, cache_reply);
966 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
967 }
968
969 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
970 {
971 struct nfs_inode *nfsi = NFS_I(dir);
972
973 spin_lock(&dir->i_lock);
974 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
975 if (!cinfo->atomic || cinfo->before != dir->i_version)
976 nfs_force_lookup_revalidate(dir);
977 dir->i_version = cinfo->after;
978 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
979 nfs_fscache_invalidate(dir);
980 spin_unlock(&dir->i_lock);
981 }
982
983 struct nfs4_opendata {
984 struct kref kref;
985 struct nfs_openargs o_arg;
986 struct nfs_openres o_res;
987 struct nfs_open_confirmargs c_arg;
988 struct nfs_open_confirmres c_res;
989 struct nfs4_string owner_name;
990 struct nfs4_string group_name;
991 struct nfs4_label *a_label;
992 struct nfs_fattr f_attr;
993 struct nfs4_label *f_label;
994 struct dentry *dir;
995 struct dentry *dentry;
996 struct nfs4_state_owner *owner;
997 struct nfs4_state *state;
998 struct iattr attrs;
999 unsigned long timestamp;
1000 unsigned int rpc_done : 1;
1001 unsigned int file_created : 1;
1002 unsigned int is_recover : 1;
1003 int rpc_status;
1004 int cancelled;
1005 };
1006
1007 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1008 int err, struct nfs4_exception *exception)
1009 {
1010 if (err != -EINVAL)
1011 return false;
1012 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1013 return false;
1014 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1015 exception->retry = 1;
1016 return true;
1017 }
1018
1019 static u32
1020 nfs4_map_atomic_open_share(struct nfs_server *server,
1021 fmode_t fmode, int openflags)
1022 {
1023 u32 res = 0;
1024
1025 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1026 case FMODE_READ:
1027 res = NFS4_SHARE_ACCESS_READ;
1028 break;
1029 case FMODE_WRITE:
1030 res = NFS4_SHARE_ACCESS_WRITE;
1031 break;
1032 case FMODE_READ|FMODE_WRITE:
1033 res = NFS4_SHARE_ACCESS_BOTH;
1034 }
1035 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1036 goto out;
1037 /* Want no delegation if we're using O_DIRECT */
1038 if (openflags & O_DIRECT)
1039 res |= NFS4_SHARE_WANT_NO_DELEG;
1040 out:
1041 return res;
1042 }
1043
1044 static enum open_claim_type4
1045 nfs4_map_atomic_open_claim(struct nfs_server *server,
1046 enum open_claim_type4 claim)
1047 {
1048 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1049 return claim;
1050 switch (claim) {
1051 default:
1052 return claim;
1053 case NFS4_OPEN_CLAIM_FH:
1054 return NFS4_OPEN_CLAIM_NULL;
1055 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1056 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1057 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1058 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1059 }
1060 }
1061
1062 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1063 {
1064 p->o_res.f_attr = &p->f_attr;
1065 p->o_res.f_label = p->f_label;
1066 p->o_res.seqid = p->o_arg.seqid;
1067 p->c_res.seqid = p->c_arg.seqid;
1068 p->o_res.server = p->o_arg.server;
1069 p->o_res.access_request = p->o_arg.access;
1070 nfs_fattr_init(&p->f_attr);
1071 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1072 }
1073
1074 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1075 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1076 const struct iattr *attrs,
1077 struct nfs4_label *label,
1078 enum open_claim_type4 claim,
1079 gfp_t gfp_mask)
1080 {
1081 struct dentry *parent = dget_parent(dentry);
1082 struct inode *dir = d_inode(parent);
1083 struct nfs_server *server = NFS_SERVER(dir);
1084 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1085 struct nfs4_opendata *p;
1086
1087 p = kzalloc(sizeof(*p), gfp_mask);
1088 if (p == NULL)
1089 goto err;
1090
1091 p->f_label = nfs4_label_alloc(server, gfp_mask);
1092 if (IS_ERR(p->f_label))
1093 goto err_free_p;
1094
1095 p->a_label = nfs4_label_alloc(server, gfp_mask);
1096 if (IS_ERR(p->a_label))
1097 goto err_free_f;
1098
1099 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1100 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1101 if (IS_ERR(p->o_arg.seqid))
1102 goto err_free_label;
1103 nfs_sb_active(dentry->d_sb);
1104 p->dentry = dget(dentry);
1105 p->dir = parent;
1106 p->owner = sp;
1107 atomic_inc(&sp->so_count);
1108 p->o_arg.open_flags = flags;
1109 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1110 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1111 fmode, flags);
1112 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1113 * will return permission denied for all bits until close */
1114 if (!(flags & O_EXCL)) {
1115 /* ask server to check for all possible rights as results
1116 * are cached */
1117 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1118 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1119 }
1120 p->o_arg.clientid = server->nfs_client->cl_clientid;
1121 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1122 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1123 p->o_arg.name = &dentry->d_name;
1124 p->o_arg.server = server;
1125 p->o_arg.bitmask = nfs4_bitmask(server, label);
1126 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1127 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1128 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1129 switch (p->o_arg.claim) {
1130 case NFS4_OPEN_CLAIM_NULL:
1131 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1132 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1133 p->o_arg.fh = NFS_FH(dir);
1134 break;
1135 case NFS4_OPEN_CLAIM_PREVIOUS:
1136 case NFS4_OPEN_CLAIM_FH:
1137 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1138 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1139 p->o_arg.fh = NFS_FH(d_inode(dentry));
1140 }
1141 if (attrs != NULL && attrs->ia_valid != 0) {
1142 __u32 verf[2];
1143
1144 p->o_arg.u.attrs = &p->attrs;
1145 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1146
1147 verf[0] = jiffies;
1148 verf[1] = current->pid;
1149 memcpy(p->o_arg.u.verifier.data, verf,
1150 sizeof(p->o_arg.u.verifier.data));
1151 }
1152 p->c_arg.fh = &p->o_res.fh;
1153 p->c_arg.stateid = &p->o_res.stateid;
1154 p->c_arg.seqid = p->o_arg.seqid;
1155 nfs4_init_opendata_res(p);
1156 kref_init(&p->kref);
1157 return p;
1158
1159 err_free_label:
1160 nfs4_label_free(p->a_label);
1161 err_free_f:
1162 nfs4_label_free(p->f_label);
1163 err_free_p:
1164 kfree(p);
1165 err:
1166 dput(parent);
1167 return NULL;
1168 }
1169
1170 static void nfs4_opendata_free(struct kref *kref)
1171 {
1172 struct nfs4_opendata *p = container_of(kref,
1173 struct nfs4_opendata, kref);
1174 struct super_block *sb = p->dentry->d_sb;
1175
1176 nfs_free_seqid(p->o_arg.seqid);
1177 if (p->state != NULL)
1178 nfs4_put_open_state(p->state);
1179 nfs4_put_state_owner(p->owner);
1180
1181 nfs4_label_free(p->a_label);
1182 nfs4_label_free(p->f_label);
1183
1184 dput(p->dir);
1185 dput(p->dentry);
1186 nfs_sb_deactive(sb);
1187 nfs_fattr_free_names(&p->f_attr);
1188 kfree(p->f_attr.mdsthreshold);
1189 kfree(p);
1190 }
1191
1192 static void nfs4_opendata_put(struct nfs4_opendata *p)
1193 {
1194 if (p != NULL)
1195 kref_put(&p->kref, nfs4_opendata_free);
1196 }
1197
1198 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1199 {
1200 int ret;
1201
1202 ret = rpc_wait_for_completion_task(task);
1203 return ret;
1204 }
1205
1206 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1207 fmode_t fmode)
1208 {
1209 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1210 case FMODE_READ|FMODE_WRITE:
1211 return state->n_rdwr != 0;
1212 case FMODE_WRITE:
1213 return state->n_wronly != 0;
1214 case FMODE_READ:
1215 return state->n_rdonly != 0;
1216 }
1217 WARN_ON_ONCE(1);
1218 return false;
1219 }
1220
1221 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1222 {
1223 int ret = 0;
1224
1225 if (open_mode & (O_EXCL|O_TRUNC))
1226 goto out;
1227 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1228 case FMODE_READ:
1229 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1230 && state->n_rdonly != 0;
1231 break;
1232 case FMODE_WRITE:
1233 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1234 && state->n_wronly != 0;
1235 break;
1236 case FMODE_READ|FMODE_WRITE:
1237 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1238 && state->n_rdwr != 0;
1239 }
1240 out:
1241 return ret;
1242 }
1243
1244 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1245 enum open_claim_type4 claim)
1246 {
1247 if (delegation == NULL)
1248 return 0;
1249 if ((delegation->type & fmode) != fmode)
1250 return 0;
1251 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1252 return 0;
1253 switch (claim) {
1254 case NFS4_OPEN_CLAIM_NULL:
1255 case NFS4_OPEN_CLAIM_FH:
1256 break;
1257 case NFS4_OPEN_CLAIM_PREVIOUS:
1258 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1259 break;
1260 default:
1261 return 0;
1262 }
1263 nfs_mark_delegation_referenced(delegation);
1264 return 1;
1265 }
1266
1267 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1268 {
1269 switch (fmode) {
1270 case FMODE_WRITE:
1271 state->n_wronly++;
1272 break;
1273 case FMODE_READ:
1274 state->n_rdonly++;
1275 break;
1276 case FMODE_READ|FMODE_WRITE:
1277 state->n_rdwr++;
1278 }
1279 nfs4_state_set_mode_locked(state, state->state | fmode);
1280 }
1281
1282 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1283 {
1284 struct nfs_client *clp = state->owner->so_server->nfs_client;
1285 bool need_recover = false;
1286
1287 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1288 need_recover = true;
1289 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1290 need_recover = true;
1291 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1292 need_recover = true;
1293 if (need_recover)
1294 nfs4_state_mark_reclaim_nograce(clp, state);
1295 }
1296
1297 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1298 nfs4_stateid *stateid)
1299 {
1300 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1301 return true;
1302 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1303 nfs_test_and_clear_all_open_stateid(state);
1304 return true;
1305 }
1306 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1307 return true;
1308 return false;
1309 }
1310
1311 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1312 {
1313 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1314 return;
1315 if (state->n_wronly)
1316 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1317 if (state->n_rdonly)
1318 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1319 if (state->n_rdwr)
1320 set_bit(NFS_O_RDWR_STATE, &state->flags);
1321 set_bit(NFS_OPEN_STATE, &state->flags);
1322 }
1323
1324 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1325 nfs4_stateid *arg_stateid,
1326 nfs4_stateid *stateid, fmode_t fmode)
1327 {
1328 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1329 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1330 case FMODE_WRITE:
1331 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1332 break;
1333 case FMODE_READ:
1334 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1335 break;
1336 case 0:
1337 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1338 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1339 clear_bit(NFS_OPEN_STATE, &state->flags);
1340 }
1341 if (stateid == NULL)
1342 return;
1343 /* Handle races with OPEN */
1344 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
1345 (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1346 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
1347 nfs_resync_open_stateid_locked(state);
1348 return;
1349 }
1350 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1351 nfs4_stateid_copy(&state->stateid, stateid);
1352 nfs4_stateid_copy(&state->open_stateid, stateid);
1353 }
1354
1355 static void nfs_clear_open_stateid(struct nfs4_state *state,
1356 nfs4_stateid *arg_stateid,
1357 nfs4_stateid *stateid, fmode_t fmode)
1358 {
1359 write_seqlock(&state->seqlock);
1360 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
1361 write_sequnlock(&state->seqlock);
1362 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1363 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1364 }
1365
1366 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1367 {
1368 switch (fmode) {
1369 case FMODE_READ:
1370 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1371 break;
1372 case FMODE_WRITE:
1373 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1374 break;
1375 case FMODE_READ|FMODE_WRITE:
1376 set_bit(NFS_O_RDWR_STATE, &state->flags);
1377 }
1378 if (!nfs_need_update_open_stateid(state, stateid))
1379 return;
1380 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1381 nfs4_stateid_copy(&state->stateid, stateid);
1382 nfs4_stateid_copy(&state->open_stateid, stateid);
1383 }
1384
1385 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1386 {
1387 /*
1388 * Protect the call to nfs4_state_set_mode_locked and
1389 * serialise the stateid update
1390 */
1391 spin_lock(&state->owner->so_lock);
1392 write_seqlock(&state->seqlock);
1393 if (deleg_stateid != NULL) {
1394 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1395 set_bit(NFS_DELEGATED_STATE, &state->flags);
1396 }
1397 if (open_stateid != NULL)
1398 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1399 write_sequnlock(&state->seqlock);
1400 update_open_stateflags(state, fmode);
1401 spin_unlock(&state->owner->so_lock);
1402 }
1403
1404 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1405 {
1406 struct nfs_inode *nfsi = NFS_I(state->inode);
1407 struct nfs_delegation *deleg_cur;
1408 int ret = 0;
1409
1410 fmode &= (FMODE_READ|FMODE_WRITE);
1411
1412 rcu_read_lock();
1413 deleg_cur = rcu_dereference(nfsi->delegation);
1414 if (deleg_cur == NULL)
1415 goto no_delegation;
1416
1417 spin_lock(&deleg_cur->lock);
1418 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1419 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1420 (deleg_cur->type & fmode) != fmode)
1421 goto no_delegation_unlock;
1422
1423 if (delegation == NULL)
1424 delegation = &deleg_cur->stateid;
1425 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1426 goto no_delegation_unlock;
1427
1428 nfs_mark_delegation_referenced(deleg_cur);
1429 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1430 ret = 1;
1431 no_delegation_unlock:
1432 spin_unlock(&deleg_cur->lock);
1433 no_delegation:
1434 rcu_read_unlock();
1435
1436 if (!ret && open_stateid != NULL) {
1437 __update_open_stateid(state, open_stateid, NULL, fmode);
1438 ret = 1;
1439 }
1440 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1441 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1442
1443 return ret;
1444 }
1445
1446 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1447 const nfs4_stateid *stateid)
1448 {
1449 struct nfs4_state *state = lsp->ls_state;
1450 bool ret = false;
1451
1452 spin_lock(&state->state_lock);
1453 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1454 goto out_noupdate;
1455 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1456 goto out_noupdate;
1457 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1458 ret = true;
1459 out_noupdate:
1460 spin_unlock(&state->state_lock);
1461 return ret;
1462 }
1463
1464 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1465 {
1466 struct nfs_delegation *delegation;
1467
1468 rcu_read_lock();
1469 delegation = rcu_dereference(NFS_I(inode)->delegation);
1470 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1471 rcu_read_unlock();
1472 return;
1473 }
1474 rcu_read_unlock();
1475 nfs4_inode_return_delegation(inode);
1476 }
1477
1478 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1479 {
1480 struct nfs4_state *state = opendata->state;
1481 struct nfs_inode *nfsi = NFS_I(state->inode);
1482 struct nfs_delegation *delegation;
1483 int open_mode = opendata->o_arg.open_flags;
1484 fmode_t fmode = opendata->o_arg.fmode;
1485 enum open_claim_type4 claim = opendata->o_arg.claim;
1486 nfs4_stateid stateid;
1487 int ret = -EAGAIN;
1488
1489 for (;;) {
1490 spin_lock(&state->owner->so_lock);
1491 if (can_open_cached(state, fmode, open_mode)) {
1492 update_open_stateflags(state, fmode);
1493 spin_unlock(&state->owner->so_lock);
1494 goto out_return_state;
1495 }
1496 spin_unlock(&state->owner->so_lock);
1497 rcu_read_lock();
1498 delegation = rcu_dereference(nfsi->delegation);
1499 if (!can_open_delegated(delegation, fmode, claim)) {
1500 rcu_read_unlock();
1501 break;
1502 }
1503 /* Save the delegation */
1504 nfs4_stateid_copy(&stateid, &delegation->stateid);
1505 rcu_read_unlock();
1506 nfs_release_seqid(opendata->o_arg.seqid);
1507 if (!opendata->is_recover) {
1508 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1509 if (ret != 0)
1510 goto out;
1511 }
1512 ret = -EAGAIN;
1513
1514 /* Try to update the stateid using the delegation */
1515 if (update_open_stateid(state, NULL, &stateid, fmode))
1516 goto out_return_state;
1517 }
1518 out:
1519 return ERR_PTR(ret);
1520 out_return_state:
1521 atomic_inc(&state->count);
1522 return state;
1523 }
1524
1525 static void
1526 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1527 {
1528 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1529 struct nfs_delegation *delegation;
1530 int delegation_flags = 0;
1531
1532 rcu_read_lock();
1533 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1534 if (delegation)
1535 delegation_flags = delegation->flags;
1536 rcu_read_unlock();
1537 switch (data->o_arg.claim) {
1538 default:
1539 break;
1540 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1541 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1542 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1543 "returning a delegation for "
1544 "OPEN(CLAIM_DELEGATE_CUR)\n",
1545 clp->cl_hostname);
1546 return;
1547 }
1548 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1549 nfs_inode_set_delegation(state->inode,
1550 data->owner->so_cred,
1551 &data->o_res);
1552 else
1553 nfs_inode_reclaim_delegation(state->inode,
1554 data->owner->so_cred,
1555 &data->o_res);
1556 }
1557
1558 /*
1559 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1560 * and update the nfs4_state.
1561 */
1562 static struct nfs4_state *
1563 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1564 {
1565 struct inode *inode = data->state->inode;
1566 struct nfs4_state *state = data->state;
1567 int ret;
1568
1569 if (!data->rpc_done) {
1570 if (data->rpc_status) {
1571 ret = data->rpc_status;
1572 goto err;
1573 }
1574 /* cached opens have already been processed */
1575 goto update;
1576 }
1577
1578 ret = nfs_refresh_inode(inode, &data->f_attr);
1579 if (ret)
1580 goto err;
1581
1582 if (data->o_res.delegation_type != 0)
1583 nfs4_opendata_check_deleg(data, state);
1584 update:
1585 update_open_stateid(state, &data->o_res.stateid, NULL,
1586 data->o_arg.fmode);
1587 atomic_inc(&state->count);
1588
1589 return state;
1590 err:
1591 return ERR_PTR(ret);
1592
1593 }
1594
1595 static struct nfs4_state *
1596 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1597 {
1598 struct inode *inode;
1599 struct nfs4_state *state = NULL;
1600 int ret;
1601
1602 if (!data->rpc_done) {
1603 state = nfs4_try_open_cached(data);
1604 trace_nfs4_cached_open(data->state);
1605 goto out;
1606 }
1607
1608 ret = -EAGAIN;
1609 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1610 goto err;
1611 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1612 ret = PTR_ERR(inode);
1613 if (IS_ERR(inode))
1614 goto err;
1615 ret = -ENOMEM;
1616 state = nfs4_get_open_state(inode, data->owner);
1617 if (state == NULL)
1618 goto err_put_inode;
1619 if (data->o_res.delegation_type != 0)
1620 nfs4_opendata_check_deleg(data, state);
1621 update_open_stateid(state, &data->o_res.stateid, NULL,
1622 data->o_arg.fmode);
1623 iput(inode);
1624 out:
1625 nfs_release_seqid(data->o_arg.seqid);
1626 return state;
1627 err_put_inode:
1628 iput(inode);
1629 err:
1630 return ERR_PTR(ret);
1631 }
1632
1633 static struct nfs4_state *
1634 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1635 {
1636 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1637 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1638 return _nfs4_opendata_to_nfs4_state(data);
1639 }
1640
1641 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1642 {
1643 struct nfs_inode *nfsi = NFS_I(state->inode);
1644 struct nfs_open_context *ctx;
1645
1646 spin_lock(&state->inode->i_lock);
1647 list_for_each_entry(ctx, &nfsi->open_files, list) {
1648 if (ctx->state != state)
1649 continue;
1650 get_nfs_open_context(ctx);
1651 spin_unlock(&state->inode->i_lock);
1652 return ctx;
1653 }
1654 spin_unlock(&state->inode->i_lock);
1655 return ERR_PTR(-ENOENT);
1656 }
1657
1658 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1659 struct nfs4_state *state, enum open_claim_type4 claim)
1660 {
1661 struct nfs4_opendata *opendata;
1662
1663 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1664 NULL, NULL, claim, GFP_NOFS);
1665 if (opendata == NULL)
1666 return ERR_PTR(-ENOMEM);
1667 opendata->state = state;
1668 atomic_inc(&state->count);
1669 return opendata;
1670 }
1671
1672 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
1673 fmode_t fmode)
1674 {
1675 struct nfs4_state *newstate;
1676 int ret;
1677
1678 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
1679 return 0;
1680 opendata->o_arg.open_flags = 0;
1681 opendata->o_arg.fmode = fmode;
1682 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1683 NFS_SB(opendata->dentry->d_sb),
1684 fmode, 0);
1685 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1686 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1687 nfs4_init_opendata_res(opendata);
1688 ret = _nfs4_recover_proc_open(opendata);
1689 if (ret != 0)
1690 return ret;
1691 newstate = nfs4_opendata_to_nfs4_state(opendata);
1692 if (IS_ERR(newstate))
1693 return PTR_ERR(newstate);
1694 if (newstate != opendata->state)
1695 ret = -ESTALE;
1696 nfs4_close_state(newstate, fmode);
1697 return ret;
1698 }
1699
1700 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1701 {
1702 int ret;
1703
1704 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1705 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1706 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1707 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1708 /* memory barrier prior to reading state->n_* */
1709 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1710 clear_bit(NFS_OPEN_STATE, &state->flags);
1711 smp_rmb();
1712 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1713 if (ret != 0)
1714 return ret;
1715 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1716 if (ret != 0)
1717 return ret;
1718 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
1719 if (ret != 0)
1720 return ret;
1721 /*
1722 * We may have performed cached opens for all three recoveries.
1723 * Check if we need to update the current stateid.
1724 */
1725 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1726 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1727 write_seqlock(&state->seqlock);
1728 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1729 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1730 write_sequnlock(&state->seqlock);
1731 }
1732 return 0;
1733 }
1734
1735 /*
1736 * OPEN_RECLAIM:
1737 * reclaim state on the server after a reboot.
1738 */
1739 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1740 {
1741 struct nfs_delegation *delegation;
1742 struct nfs4_opendata *opendata;
1743 fmode_t delegation_type = 0;
1744 int status;
1745
1746 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1747 NFS4_OPEN_CLAIM_PREVIOUS);
1748 if (IS_ERR(opendata))
1749 return PTR_ERR(opendata);
1750 rcu_read_lock();
1751 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1752 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1753 delegation_type = delegation->type;
1754 rcu_read_unlock();
1755 opendata->o_arg.u.delegation_type = delegation_type;
1756 status = nfs4_open_recover(opendata, state);
1757 nfs4_opendata_put(opendata);
1758 return status;
1759 }
1760
1761 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1762 {
1763 struct nfs_server *server = NFS_SERVER(state->inode);
1764 struct nfs4_exception exception = { };
1765 int err;
1766 do {
1767 err = _nfs4_do_open_reclaim(ctx, state);
1768 trace_nfs4_open_reclaim(ctx, 0, err);
1769 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1770 continue;
1771 if (err != -NFS4ERR_DELAY)
1772 break;
1773 nfs4_handle_exception(server, err, &exception);
1774 } while (exception.retry);
1775 return err;
1776 }
1777
1778 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1779 {
1780 struct nfs_open_context *ctx;
1781 int ret;
1782
1783 ctx = nfs4_state_find_open_context(state);
1784 if (IS_ERR(ctx))
1785 return -EAGAIN;
1786 ret = nfs4_do_open_reclaim(ctx, state);
1787 put_nfs_open_context(ctx);
1788 return ret;
1789 }
1790
1791 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1792 {
1793 switch (err) {
1794 default:
1795 printk(KERN_ERR "NFS: %s: unhandled error "
1796 "%d.\n", __func__, err);
1797 case 0:
1798 case -ENOENT:
1799 case -EAGAIN:
1800 case -ESTALE:
1801 break;
1802 case -NFS4ERR_BADSESSION:
1803 case -NFS4ERR_BADSLOT:
1804 case -NFS4ERR_BAD_HIGH_SLOT:
1805 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1806 case -NFS4ERR_DEADSESSION:
1807 set_bit(NFS_DELEGATED_STATE, &state->flags);
1808 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1809 return -EAGAIN;
1810 case -NFS4ERR_STALE_CLIENTID:
1811 case -NFS4ERR_STALE_STATEID:
1812 set_bit(NFS_DELEGATED_STATE, &state->flags);
1813 case -NFS4ERR_EXPIRED:
1814 /* Don't recall a delegation if it was lost */
1815 nfs4_schedule_lease_recovery(server->nfs_client);
1816 return -EAGAIN;
1817 case -NFS4ERR_MOVED:
1818 nfs4_schedule_migration_recovery(server);
1819 return -EAGAIN;
1820 case -NFS4ERR_LEASE_MOVED:
1821 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1822 return -EAGAIN;
1823 case -NFS4ERR_DELEG_REVOKED:
1824 case -NFS4ERR_ADMIN_REVOKED:
1825 case -NFS4ERR_BAD_STATEID:
1826 case -NFS4ERR_OPENMODE:
1827 nfs_inode_find_state_and_recover(state->inode,
1828 stateid);
1829 nfs4_schedule_stateid_recovery(server, state);
1830 return -EAGAIN;
1831 case -NFS4ERR_DELAY:
1832 case -NFS4ERR_GRACE:
1833 set_bit(NFS_DELEGATED_STATE, &state->flags);
1834 ssleep(1);
1835 return -EAGAIN;
1836 case -ENOMEM:
1837 case -NFS4ERR_DENIED:
1838 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1839 return 0;
1840 }
1841 return err;
1842 }
1843
1844 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
1845 struct nfs4_state *state, const nfs4_stateid *stateid,
1846 fmode_t type)
1847 {
1848 struct nfs_server *server = NFS_SERVER(state->inode);
1849 struct nfs4_opendata *opendata;
1850 int err = 0;
1851
1852 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1853 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1854 if (IS_ERR(opendata))
1855 return PTR_ERR(opendata);
1856 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1857 write_seqlock(&state->seqlock);
1858 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1859 write_sequnlock(&state->seqlock);
1860 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1861 switch (type & (FMODE_READ|FMODE_WRITE)) {
1862 case FMODE_READ|FMODE_WRITE:
1863 case FMODE_WRITE:
1864 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1865 if (err)
1866 break;
1867 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1868 if (err)
1869 break;
1870 case FMODE_READ:
1871 err = nfs4_open_recover_helper(opendata, FMODE_READ);
1872 }
1873 nfs4_opendata_put(opendata);
1874 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1875 }
1876
1877 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1878 {
1879 struct nfs4_opendata *data = calldata;
1880
1881 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1882 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1883 }
1884
1885 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1886 {
1887 struct nfs4_opendata *data = calldata;
1888
1889 nfs40_sequence_done(task, &data->c_res.seq_res);
1890
1891 data->rpc_status = task->tk_status;
1892 if (data->rpc_status == 0) {
1893 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1894 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1895 renew_lease(data->o_res.server, data->timestamp);
1896 data->rpc_done = 1;
1897 }
1898 }
1899
1900 static void nfs4_open_confirm_release(void *calldata)
1901 {
1902 struct nfs4_opendata *data = calldata;
1903 struct nfs4_state *state = NULL;
1904
1905 /* If this request hasn't been cancelled, do nothing */
1906 if (data->cancelled == 0)
1907 goto out_free;
1908 /* In case of error, no cleanup! */
1909 if (!data->rpc_done)
1910 goto out_free;
1911 state = nfs4_opendata_to_nfs4_state(data);
1912 if (!IS_ERR(state))
1913 nfs4_close_state(state, data->o_arg.fmode);
1914 out_free:
1915 nfs4_opendata_put(data);
1916 }
1917
1918 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1919 .rpc_call_prepare = nfs4_open_confirm_prepare,
1920 .rpc_call_done = nfs4_open_confirm_done,
1921 .rpc_release = nfs4_open_confirm_release,
1922 };
1923
1924 /*
1925 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1926 */
1927 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1928 {
1929 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
1930 struct rpc_task *task;
1931 struct rpc_message msg = {
1932 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1933 .rpc_argp = &data->c_arg,
1934 .rpc_resp = &data->c_res,
1935 .rpc_cred = data->owner->so_cred,
1936 };
1937 struct rpc_task_setup task_setup_data = {
1938 .rpc_client = server->client,
1939 .rpc_message = &msg,
1940 .callback_ops = &nfs4_open_confirm_ops,
1941 .callback_data = data,
1942 .workqueue = nfsiod_workqueue,
1943 .flags = RPC_TASK_ASYNC,
1944 };
1945 int status;
1946
1947 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1948 kref_get(&data->kref);
1949 data->rpc_done = 0;
1950 data->rpc_status = 0;
1951 data->timestamp = jiffies;
1952 if (data->is_recover)
1953 nfs4_set_sequence_privileged(&data->c_arg.seq_args);
1954 task = rpc_run_task(&task_setup_data);
1955 if (IS_ERR(task))
1956 return PTR_ERR(task);
1957 status = nfs4_wait_for_completion_rpc_task(task);
1958 if (status != 0) {
1959 data->cancelled = 1;
1960 smp_wmb();
1961 } else
1962 status = data->rpc_status;
1963 rpc_put_task(task);
1964 return status;
1965 }
1966
1967 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1968 {
1969 struct nfs4_opendata *data = calldata;
1970 struct nfs4_state_owner *sp = data->owner;
1971 struct nfs_client *clp = sp->so_server->nfs_client;
1972 enum open_claim_type4 claim = data->o_arg.claim;
1973
1974 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1975 goto out_wait;
1976 /*
1977 * Check if we still need to send an OPEN call, or if we can use
1978 * a delegation instead.
1979 */
1980 if (data->state != NULL) {
1981 struct nfs_delegation *delegation;
1982
1983 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1984 goto out_no_action;
1985 rcu_read_lock();
1986 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1987 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
1988 goto unlock_no_action;
1989 rcu_read_unlock();
1990 }
1991 /* Update client id. */
1992 data->o_arg.clientid = clp->cl_clientid;
1993 switch (claim) {
1994 default:
1995 break;
1996 case NFS4_OPEN_CLAIM_PREVIOUS:
1997 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1998 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1999 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2000 case NFS4_OPEN_CLAIM_FH:
2001 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2002 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
2003 }
2004 data->timestamp = jiffies;
2005 if (nfs4_setup_sequence(data->o_arg.server,
2006 &data->o_arg.seq_args,
2007 &data->o_res.seq_res,
2008 task) != 0)
2009 nfs_release_seqid(data->o_arg.seqid);
2010
2011 /* Set the create mode (note dependency on the session type) */
2012 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2013 if (data->o_arg.open_flags & O_EXCL) {
2014 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2015 if (nfs4_has_persistent_session(clp))
2016 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2017 else if (clp->cl_mvops->minor_version > 0)
2018 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2019 }
2020 return;
2021 unlock_no_action:
2022 trace_nfs4_cached_open(data->state);
2023 rcu_read_unlock();
2024 out_no_action:
2025 task->tk_action = NULL;
2026 out_wait:
2027 nfs4_sequence_done(task, &data->o_res.seq_res);
2028 }
2029
2030 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2031 {
2032 struct nfs4_opendata *data = calldata;
2033
2034 data->rpc_status = task->tk_status;
2035
2036 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
2037 return;
2038
2039 if (task->tk_status == 0) {
2040 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2041 switch (data->o_res.f_attr->mode & S_IFMT) {
2042 case S_IFREG:
2043 break;
2044 case S_IFLNK:
2045 data->rpc_status = -ELOOP;
2046 break;
2047 case S_IFDIR:
2048 data->rpc_status = -EISDIR;
2049 break;
2050 default:
2051 data->rpc_status = -ENOTDIR;
2052 }
2053 }
2054 renew_lease(data->o_res.server, data->timestamp);
2055 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2056 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2057 }
2058 data->rpc_done = 1;
2059 }
2060
2061 static void nfs4_open_release(void *calldata)
2062 {
2063 struct nfs4_opendata *data = calldata;
2064 struct nfs4_state *state = NULL;
2065
2066 /* If this request hasn't been cancelled, do nothing */
2067 if (data->cancelled == 0)
2068 goto out_free;
2069 /* In case of error, no cleanup! */
2070 if (data->rpc_status != 0 || !data->rpc_done)
2071 goto out_free;
2072 /* In case we need an open_confirm, no cleanup! */
2073 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2074 goto out_free;
2075 state = nfs4_opendata_to_nfs4_state(data);
2076 if (!IS_ERR(state))
2077 nfs4_close_state(state, data->o_arg.fmode);
2078 out_free:
2079 nfs4_opendata_put(data);
2080 }
2081
2082 static const struct rpc_call_ops nfs4_open_ops = {
2083 .rpc_call_prepare = nfs4_open_prepare,
2084 .rpc_call_done = nfs4_open_done,
2085 .rpc_release = nfs4_open_release,
2086 };
2087
2088 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
2089 {
2090 struct inode *dir = d_inode(data->dir);
2091 struct nfs_server *server = NFS_SERVER(dir);
2092 struct nfs_openargs *o_arg = &data->o_arg;
2093 struct nfs_openres *o_res = &data->o_res;
2094 struct rpc_task *task;
2095 struct rpc_message msg = {
2096 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2097 .rpc_argp = o_arg,
2098 .rpc_resp = o_res,
2099 .rpc_cred = data->owner->so_cred,
2100 };
2101 struct rpc_task_setup task_setup_data = {
2102 .rpc_client = server->client,
2103 .rpc_message = &msg,
2104 .callback_ops = &nfs4_open_ops,
2105 .callback_data = data,
2106 .workqueue = nfsiod_workqueue,
2107 .flags = RPC_TASK_ASYNC,
2108 };
2109 int status;
2110
2111 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
2112 kref_get(&data->kref);
2113 data->rpc_done = 0;
2114 data->rpc_status = 0;
2115 data->cancelled = 0;
2116 data->is_recover = 0;
2117 if (isrecover) {
2118 nfs4_set_sequence_privileged(&o_arg->seq_args);
2119 data->is_recover = 1;
2120 }
2121 task = rpc_run_task(&task_setup_data);
2122 if (IS_ERR(task))
2123 return PTR_ERR(task);
2124 status = nfs4_wait_for_completion_rpc_task(task);
2125 if (status != 0) {
2126 data->cancelled = 1;
2127 smp_wmb();
2128 } else
2129 status = data->rpc_status;
2130 rpc_put_task(task);
2131
2132 return status;
2133 }
2134
2135 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2136 {
2137 struct inode *dir = d_inode(data->dir);
2138 struct nfs_openres *o_res = &data->o_res;
2139 int status;
2140
2141 status = nfs4_run_open_task(data, 1);
2142 if (status != 0 || !data->rpc_done)
2143 return status;
2144
2145 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2146
2147 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2148 status = _nfs4_proc_open_confirm(data);
2149 if (status != 0)
2150 return status;
2151 }
2152
2153 return status;
2154 }
2155
2156 /*
2157 * Additional permission checks in order to distinguish between an
2158 * open for read, and an open for execute. This works around the
2159 * fact that NFSv4 OPEN treats read and execute permissions as being
2160 * the same.
2161 * Note that in the non-execute case, we want to turn off permission
2162 * checking if we just created a new file (POSIX open() semantics).
2163 */
2164 static int nfs4_opendata_access(struct rpc_cred *cred,
2165 struct nfs4_opendata *opendata,
2166 struct nfs4_state *state, fmode_t fmode,
2167 int openflags)
2168 {
2169 struct nfs_access_entry cache;
2170 u32 mask;
2171
2172 /* access call failed or for some reason the server doesn't
2173 * support any access modes -- defer access call until later */
2174 if (opendata->o_res.access_supported == 0)
2175 return 0;
2176
2177 mask = 0;
2178 /*
2179 * Use openflags to check for exec, because fmode won't
2180 * always have FMODE_EXEC set when file open for exec.
2181 */
2182 if (openflags & __FMODE_EXEC) {
2183 /* ONLY check for exec rights */
2184 mask = MAY_EXEC;
2185 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2186 mask = MAY_READ;
2187
2188 cache.cred = cred;
2189 cache.jiffies = jiffies;
2190 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2191 nfs_access_add_cache(state->inode, &cache);
2192
2193 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2194 return 0;
2195
2196 /* even though OPEN succeeded, access is denied. Close the file */
2197 nfs4_close_state(state, fmode);
2198 return -EACCES;
2199 }
2200
2201 /*
2202 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2203 */
2204 static int _nfs4_proc_open(struct nfs4_opendata *data)
2205 {
2206 struct inode *dir = d_inode(data->dir);
2207 struct nfs_server *server = NFS_SERVER(dir);
2208 struct nfs_openargs *o_arg = &data->o_arg;
2209 struct nfs_openres *o_res = &data->o_res;
2210 int status;
2211
2212 status = nfs4_run_open_task(data, 0);
2213 if (!data->rpc_done)
2214 return status;
2215 if (status != 0) {
2216 if (status == -NFS4ERR_BADNAME &&
2217 !(o_arg->open_flags & O_CREAT))
2218 return -ENOENT;
2219 return status;
2220 }
2221
2222 nfs_fattr_map_and_free_names(server, &data->f_attr);
2223
2224 if (o_arg->open_flags & O_CREAT) {
2225 update_changeattr(dir, &o_res->cinfo);
2226 if (o_arg->open_flags & O_EXCL)
2227 data->file_created = 1;
2228 else if (o_res->cinfo.before != o_res->cinfo.after)
2229 data->file_created = 1;
2230 }
2231 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2232 server->caps &= ~NFS_CAP_POSIX_LOCK;
2233 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2234 status = _nfs4_proc_open_confirm(data);
2235 if (status != 0)
2236 return status;
2237 }
2238 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2239 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2240 return 0;
2241 }
2242
2243 static int nfs4_recover_expired_lease(struct nfs_server *server)
2244 {
2245 return nfs4_client_recover_expired_lease(server->nfs_client);
2246 }
2247
2248 /*
2249 * OPEN_EXPIRED:
2250 * reclaim state on the server after a network partition.
2251 * Assumes caller holds the appropriate lock
2252 */
2253 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2254 {
2255 struct nfs4_opendata *opendata;
2256 int ret;
2257
2258 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2259 NFS4_OPEN_CLAIM_FH);
2260 if (IS_ERR(opendata))
2261 return PTR_ERR(opendata);
2262 ret = nfs4_open_recover(opendata, state);
2263 if (ret == -ESTALE)
2264 d_drop(ctx->dentry);
2265 nfs4_opendata_put(opendata);
2266 return ret;
2267 }
2268
2269 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2270 {
2271 struct nfs_server *server = NFS_SERVER(state->inode);
2272 struct nfs4_exception exception = { };
2273 int err;
2274
2275 do {
2276 err = _nfs4_open_expired(ctx, state);
2277 trace_nfs4_open_expired(ctx, 0, err);
2278 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2279 continue;
2280 switch (err) {
2281 default:
2282 goto out;
2283 case -NFS4ERR_GRACE:
2284 case -NFS4ERR_DELAY:
2285 nfs4_handle_exception(server, err, &exception);
2286 err = 0;
2287 }
2288 } while (exception.retry);
2289 out:
2290 return err;
2291 }
2292
2293 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2294 {
2295 struct nfs_open_context *ctx;
2296 int ret;
2297
2298 ctx = nfs4_state_find_open_context(state);
2299 if (IS_ERR(ctx))
2300 return -EAGAIN;
2301 ret = nfs4_do_open_expired(ctx, state);
2302 put_nfs_open_context(ctx);
2303 return ret;
2304 }
2305
2306 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2307 {
2308 nfs_remove_bad_delegation(state->inode);
2309 write_seqlock(&state->seqlock);
2310 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2311 write_sequnlock(&state->seqlock);
2312 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2313 }
2314
2315 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2316 {
2317 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2318 nfs_finish_clear_delegation_stateid(state);
2319 }
2320
2321 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2322 {
2323 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2324 nfs40_clear_delegation_stateid(state);
2325 return nfs4_open_expired(sp, state);
2326 }
2327
2328 #if defined(CONFIG_NFS_V4_1)
2329 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2330 {
2331 struct nfs_server *server = NFS_SERVER(state->inode);
2332 nfs4_stateid stateid;
2333 struct nfs_delegation *delegation;
2334 struct rpc_cred *cred;
2335 int status;
2336
2337 /* Get the delegation credential for use by test/free_stateid */
2338 rcu_read_lock();
2339 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2340 if (delegation == NULL) {
2341 rcu_read_unlock();
2342 return;
2343 }
2344
2345 nfs4_stateid_copy(&stateid, &delegation->stateid);
2346 cred = get_rpccred(delegation->cred);
2347 rcu_read_unlock();
2348 status = nfs41_test_stateid(server, &stateid, cred);
2349 trace_nfs4_test_delegation_stateid(state, NULL, status);
2350
2351 if (status != NFS_OK) {
2352 /* Free the stateid unless the server explicitly
2353 * informs us the stateid is unrecognized. */
2354 if (status != -NFS4ERR_BAD_STATEID)
2355 nfs41_free_stateid(server, &stateid, cred);
2356 nfs_finish_clear_delegation_stateid(state);
2357 }
2358
2359 put_rpccred(cred);
2360 }
2361
2362 /**
2363 * nfs41_check_open_stateid - possibly free an open stateid
2364 *
2365 * @state: NFSv4 state for an inode
2366 *
2367 * Returns NFS_OK if recovery for this stateid is now finished.
2368 * Otherwise a negative NFS4ERR value is returned.
2369 */
2370 static int nfs41_check_open_stateid(struct nfs4_state *state)
2371 {
2372 struct nfs_server *server = NFS_SERVER(state->inode);
2373 nfs4_stateid *stateid = &state->open_stateid;
2374 struct rpc_cred *cred = state->owner->so_cred;
2375 int status;
2376
2377 /* If a state reset has been done, test_stateid is unneeded */
2378 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2379 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2380 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2381 return -NFS4ERR_BAD_STATEID;
2382
2383 status = nfs41_test_stateid(server, stateid, cred);
2384 trace_nfs4_test_open_stateid(state, NULL, status);
2385 if (status != NFS_OK) {
2386 /* Free the stateid unless the server explicitly
2387 * informs us the stateid is unrecognized. */
2388 if (status != -NFS4ERR_BAD_STATEID)
2389 nfs41_free_stateid(server, stateid, cred);
2390
2391 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2392 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2393 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2394 clear_bit(NFS_OPEN_STATE, &state->flags);
2395 }
2396 return status;
2397 }
2398
2399 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2400 {
2401 int status;
2402
2403 nfs41_check_delegation_stateid(state);
2404 status = nfs41_check_open_stateid(state);
2405 if (status != NFS_OK)
2406 status = nfs4_open_expired(sp, state);
2407 return status;
2408 }
2409 #endif
2410
2411 /*
2412 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2413 * fields corresponding to attributes that were used to store the verifier.
2414 * Make sure we clobber those fields in the later setattr call
2415 */
2416 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2417 struct iattr *sattr, struct nfs4_label **label)
2418 {
2419 const u32 *attrset = opendata->o_res.attrset;
2420
2421 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2422 !(sattr->ia_valid & ATTR_ATIME_SET))
2423 sattr->ia_valid |= ATTR_ATIME;
2424
2425 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2426 !(sattr->ia_valid & ATTR_MTIME_SET))
2427 sattr->ia_valid |= ATTR_MTIME;
2428
2429 /* Except MODE, it seems harmless of setting twice. */
2430 if ((attrset[1] & FATTR4_WORD1_MODE))
2431 sattr->ia_valid &= ~ATTR_MODE;
2432
2433 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
2434 *label = NULL;
2435 }
2436
2437 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2438 fmode_t fmode,
2439 int flags,
2440 struct nfs_open_context *ctx)
2441 {
2442 struct nfs4_state_owner *sp = opendata->owner;
2443 struct nfs_server *server = sp->so_server;
2444 struct dentry *dentry;
2445 struct nfs4_state *state;
2446 unsigned int seq;
2447 int ret;
2448
2449 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2450
2451 ret = _nfs4_proc_open(opendata);
2452 if (ret != 0)
2453 goto out;
2454
2455 state = nfs4_opendata_to_nfs4_state(opendata);
2456 ret = PTR_ERR(state);
2457 if (IS_ERR(state))
2458 goto out;
2459 if (server->caps & NFS_CAP_POSIX_LOCK)
2460 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2461
2462 dentry = opendata->dentry;
2463 if (d_really_is_negative(dentry)) {
2464 struct dentry *alias;
2465 d_drop(dentry);
2466 alias = d_exact_alias(dentry, state->inode);
2467 if (!alias)
2468 alias = d_splice_alias(igrab(state->inode), dentry);
2469 /* d_splice_alias() can't fail here - it's a non-directory */
2470 if (alias) {
2471 dput(ctx->dentry);
2472 ctx->dentry = dentry = alias;
2473 }
2474 nfs_set_verifier(dentry,
2475 nfs_save_change_attribute(d_inode(opendata->dir)));
2476 }
2477
2478 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2479 if (ret != 0)
2480 goto out;
2481
2482 ctx->state = state;
2483 if (d_inode(dentry) == state->inode) {
2484 nfs_inode_attach_open_context(ctx);
2485 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2486 nfs4_schedule_stateid_recovery(server, state);
2487 }
2488 out:
2489 return ret;
2490 }
2491
2492 /*
2493 * Returns a referenced nfs4_state
2494 */
2495 static int _nfs4_do_open(struct inode *dir,
2496 struct nfs_open_context *ctx,
2497 int flags,
2498 struct iattr *sattr,
2499 struct nfs4_label *label,
2500 int *opened)
2501 {
2502 struct nfs4_state_owner *sp;
2503 struct nfs4_state *state = NULL;
2504 struct nfs_server *server = NFS_SERVER(dir);
2505 struct nfs4_opendata *opendata;
2506 struct dentry *dentry = ctx->dentry;
2507 struct rpc_cred *cred = ctx->cred;
2508 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2509 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2510 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2511 struct nfs4_label *olabel = NULL;
2512 int status;
2513
2514 /* Protect against reboot recovery conflicts */
2515 status = -ENOMEM;
2516 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2517 if (sp == NULL) {
2518 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2519 goto out_err;
2520 }
2521 status = nfs4_recover_expired_lease(server);
2522 if (status != 0)
2523 goto err_put_state_owner;
2524 if (d_really_is_positive(dentry))
2525 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2526 status = -ENOMEM;
2527 if (d_really_is_positive(dentry))
2528 claim = NFS4_OPEN_CLAIM_FH;
2529 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2530 label, claim, GFP_KERNEL);
2531 if (opendata == NULL)
2532 goto err_put_state_owner;
2533
2534 if (label) {
2535 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2536 if (IS_ERR(olabel)) {
2537 status = PTR_ERR(olabel);
2538 goto err_opendata_put;
2539 }
2540 }
2541
2542 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2543 if (!opendata->f_attr.mdsthreshold) {
2544 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2545 if (!opendata->f_attr.mdsthreshold)
2546 goto err_free_label;
2547 }
2548 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2549 }
2550 if (d_really_is_positive(dentry))
2551 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2552
2553 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2554 if (status != 0)
2555 goto err_free_label;
2556 state = ctx->state;
2557
2558 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
2559 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2560 nfs4_exclusive_attrset(opendata, sattr, &label);
2561
2562 nfs_fattr_init(opendata->o_res.f_attr);
2563 status = nfs4_do_setattr(state->inode, cred,
2564 opendata->o_res.f_attr, sattr,
2565 state, label, olabel);
2566 if (status == 0) {
2567 nfs_setattr_update_inode(state->inode, sattr,
2568 opendata->o_res.f_attr);
2569 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2570 }
2571 }
2572 if (opened && opendata->file_created)
2573 *opened |= FILE_CREATED;
2574
2575 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2576 *ctx_th = opendata->f_attr.mdsthreshold;
2577 opendata->f_attr.mdsthreshold = NULL;
2578 }
2579
2580 nfs4_label_free(olabel);
2581
2582 nfs4_opendata_put(opendata);
2583 nfs4_put_state_owner(sp);
2584 return 0;
2585 err_free_label:
2586 nfs4_label_free(olabel);
2587 err_opendata_put:
2588 nfs4_opendata_put(opendata);
2589 err_put_state_owner:
2590 nfs4_put_state_owner(sp);
2591 out_err:
2592 return status;
2593 }
2594
2595
2596 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2597 struct nfs_open_context *ctx,
2598 int flags,
2599 struct iattr *sattr,
2600 struct nfs4_label *label,
2601 int *opened)
2602 {
2603 struct nfs_server *server = NFS_SERVER(dir);
2604 struct nfs4_exception exception = { };
2605 struct nfs4_state *res;
2606 int status;
2607
2608 do {
2609 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2610 res = ctx->state;
2611 trace_nfs4_open_file(ctx, flags, status);
2612 if (status == 0)
2613 break;
2614 /* NOTE: BAD_SEQID means the server and client disagree about the
2615 * book-keeping w.r.t. state-changing operations
2616 * (OPEN/CLOSE/LOCK/LOCKU...)
2617 * It is actually a sign of a bug on the client or on the server.
2618 *
2619 * If we receive a BAD_SEQID error in the particular case of
2620 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2621 * have unhashed the old state_owner for us, and that we can
2622 * therefore safely retry using a new one. We should still warn
2623 * the user though...
2624 */
2625 if (status == -NFS4ERR_BAD_SEQID) {
2626 pr_warn_ratelimited("NFS: v4 server %s "
2627 " returned a bad sequence-id error!\n",
2628 NFS_SERVER(dir)->nfs_client->cl_hostname);
2629 exception.retry = 1;
2630 continue;
2631 }
2632 /*
2633 * BAD_STATEID on OPEN means that the server cancelled our
2634 * state before it received the OPEN_CONFIRM.
2635 * Recover by retrying the request as per the discussion
2636 * on Page 181 of RFC3530.
2637 */
2638 if (status == -NFS4ERR_BAD_STATEID) {
2639 exception.retry = 1;
2640 continue;
2641 }
2642 if (status == -EAGAIN) {
2643 /* We must have found a delegation */
2644 exception.retry = 1;
2645 continue;
2646 }
2647 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2648 continue;
2649 res = ERR_PTR(nfs4_handle_exception(server,
2650 status, &exception));
2651 } while (exception.retry);
2652 return res;
2653 }
2654
2655 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2656 struct nfs_fattr *fattr, struct iattr *sattr,
2657 struct nfs4_state *state, struct nfs4_label *ilabel,
2658 struct nfs4_label *olabel)
2659 {
2660 struct nfs_server *server = NFS_SERVER(inode);
2661 struct nfs_setattrargs arg = {
2662 .fh = NFS_FH(inode),
2663 .iap = sattr,
2664 .server = server,
2665 .bitmask = server->attr_bitmask,
2666 .label = ilabel,
2667 };
2668 struct nfs_setattrres res = {
2669 .fattr = fattr,
2670 .label = olabel,
2671 .server = server,
2672 };
2673 struct rpc_message msg = {
2674 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2675 .rpc_argp = &arg,
2676 .rpc_resp = &res,
2677 .rpc_cred = cred,
2678 };
2679 unsigned long timestamp = jiffies;
2680 fmode_t fmode;
2681 bool truncate;
2682 int status;
2683
2684 arg.bitmask = nfs4_bitmask(server, ilabel);
2685 if (ilabel)
2686 arg.bitmask = nfs4_bitmask(server, olabel);
2687
2688 nfs_fattr_init(fattr);
2689
2690 /* Servers should only apply open mode checks for file size changes */
2691 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2692 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2693
2694 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2695 /* Use that stateid */
2696 } else if (truncate && state != NULL) {
2697 struct nfs_lockowner lockowner = {
2698 .l_owner = current->files,
2699 .l_pid = current->tgid,
2700 };
2701 if (!nfs4_valid_open_stateid(state))
2702 return -EBADF;
2703 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2704 &lockowner) == -EIO)
2705 return -EBADF;
2706 } else
2707 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2708
2709 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2710 if (status == 0 && state != NULL)
2711 renew_lease(server, timestamp);
2712 trace_nfs4_setattr(inode, &arg.stateid, status);
2713 return status;
2714 }
2715
2716 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2717 struct nfs_fattr *fattr, struct iattr *sattr,
2718 struct nfs4_state *state, struct nfs4_label *ilabel,
2719 struct nfs4_label *olabel)
2720 {
2721 struct nfs_server *server = NFS_SERVER(inode);
2722 struct nfs4_exception exception = {
2723 .state = state,
2724 .inode = inode,
2725 };
2726 int err;
2727 do {
2728 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
2729 switch (err) {
2730 case -NFS4ERR_OPENMODE:
2731 if (!(sattr->ia_valid & ATTR_SIZE)) {
2732 pr_warn_once("NFSv4: server %s is incorrectly "
2733 "applying open mode checks to "
2734 "a SETATTR that is not "
2735 "changing file size.\n",
2736 server->nfs_client->cl_hostname);
2737 }
2738 if (state && !(state->state & FMODE_WRITE)) {
2739 err = -EBADF;
2740 if (sattr->ia_valid & ATTR_OPEN)
2741 err = -EACCES;
2742 goto out;
2743 }
2744 }
2745 err = nfs4_handle_exception(server, err, &exception);
2746 } while (exception.retry);
2747 out:
2748 return err;
2749 }
2750
2751 static bool
2752 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
2753 {
2754 if (inode == NULL || !nfs_have_layout(inode))
2755 return false;
2756
2757 return pnfs_wait_on_layoutreturn(inode, task);
2758 }
2759
2760 struct nfs4_closedata {
2761 struct inode *inode;
2762 struct nfs4_state *state;
2763 struct nfs_closeargs arg;
2764 struct nfs_closeres res;
2765 struct nfs_fattr fattr;
2766 unsigned long timestamp;
2767 bool roc;
2768 u32 roc_barrier;
2769 };
2770
2771 static void nfs4_free_closedata(void *data)
2772 {
2773 struct nfs4_closedata *calldata = data;
2774 struct nfs4_state_owner *sp = calldata->state->owner;
2775 struct super_block *sb = calldata->state->inode->i_sb;
2776
2777 if (calldata->roc)
2778 pnfs_roc_release(calldata->state->inode);
2779 nfs4_put_open_state(calldata->state);
2780 nfs_free_seqid(calldata->arg.seqid);
2781 nfs4_put_state_owner(sp);
2782 nfs_sb_deactive(sb);
2783 kfree(calldata);
2784 }
2785
2786 static void nfs4_close_done(struct rpc_task *task, void *data)
2787 {
2788 struct nfs4_closedata *calldata = data;
2789 struct nfs4_state *state = calldata->state;
2790 struct nfs_server *server = NFS_SERVER(calldata->inode);
2791 nfs4_stateid *res_stateid = NULL;
2792
2793 dprintk("%s: begin!\n", __func__);
2794 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2795 return;
2796 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2797 /* hmm. we are done with the inode, and in the process of freeing
2798 * the state_owner. we keep this around to process errors
2799 */
2800 switch (task->tk_status) {
2801 case 0:
2802 res_stateid = &calldata->res.stateid;
2803 if (calldata->roc)
2804 pnfs_roc_set_barrier(state->inode,
2805 calldata->roc_barrier);
2806 renew_lease(server, calldata->timestamp);
2807 break;
2808 case -NFS4ERR_ADMIN_REVOKED:
2809 case -NFS4ERR_STALE_STATEID:
2810 case -NFS4ERR_OLD_STATEID:
2811 case -NFS4ERR_BAD_STATEID:
2812 case -NFS4ERR_EXPIRED:
2813 if (!nfs4_stateid_match(&calldata->arg.stateid,
2814 &state->open_stateid)) {
2815 rpc_restart_call_prepare(task);
2816 goto out_release;
2817 }
2818 if (calldata->arg.fmode == 0)
2819 break;
2820 default:
2821 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2822 rpc_restart_call_prepare(task);
2823 goto out_release;
2824 }
2825 }
2826 nfs_clear_open_stateid(state, &calldata->arg.stateid,
2827 res_stateid, calldata->arg.fmode);
2828 out_release:
2829 nfs_release_seqid(calldata->arg.seqid);
2830 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2831 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2832 }
2833
2834 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2835 {
2836 struct nfs4_closedata *calldata = data;
2837 struct nfs4_state *state = calldata->state;
2838 struct inode *inode = calldata->inode;
2839 bool is_rdonly, is_wronly, is_rdwr;
2840 int call_close = 0;
2841
2842 dprintk("%s: begin!\n", __func__);
2843 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2844 goto out_wait;
2845
2846 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2847 spin_lock(&state->owner->so_lock);
2848 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2849 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2850 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2851 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2852 /* Calculate the change in open mode */
2853 calldata->arg.fmode = 0;
2854 if (state->n_rdwr == 0) {
2855 if (state->n_rdonly == 0)
2856 call_close |= is_rdonly;
2857 else if (is_rdonly)
2858 calldata->arg.fmode |= FMODE_READ;
2859 if (state->n_wronly == 0)
2860 call_close |= is_wronly;
2861 else if (is_wronly)
2862 calldata->arg.fmode |= FMODE_WRITE;
2863 } else if (is_rdwr)
2864 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2865
2866 if (calldata->arg.fmode == 0)
2867 call_close |= is_rdwr;
2868
2869 if (!nfs4_valid_open_stateid(state))
2870 call_close = 0;
2871 spin_unlock(&state->owner->so_lock);
2872
2873 if (!call_close) {
2874 /* Note: exit _without_ calling nfs4_close_done */
2875 goto out_no_action;
2876 }
2877
2878 if (nfs4_wait_on_layoutreturn(inode, task)) {
2879 nfs_release_seqid(calldata->arg.seqid);
2880 goto out_wait;
2881 }
2882
2883 if (calldata->arg.fmode == 0)
2884 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2885 if (calldata->roc)
2886 pnfs_roc_get_barrier(inode, &calldata->roc_barrier);
2887
2888 calldata->arg.share_access =
2889 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2890 calldata->arg.fmode, 0);
2891
2892 nfs_fattr_init(calldata->res.fattr);
2893 calldata->timestamp = jiffies;
2894 if (nfs4_setup_sequence(NFS_SERVER(inode),
2895 &calldata->arg.seq_args,
2896 &calldata->res.seq_res,
2897 task) != 0)
2898 nfs_release_seqid(calldata->arg.seqid);
2899 dprintk("%s: done!\n", __func__);
2900 return;
2901 out_no_action:
2902 task->tk_action = NULL;
2903 out_wait:
2904 nfs4_sequence_done(task, &calldata->res.seq_res);
2905 }
2906
2907 static const struct rpc_call_ops nfs4_close_ops = {
2908 .rpc_call_prepare = nfs4_close_prepare,
2909 .rpc_call_done = nfs4_close_done,
2910 .rpc_release = nfs4_free_closedata,
2911 };
2912
2913 static bool nfs4_roc(struct inode *inode)
2914 {
2915 if (!nfs_have_layout(inode))
2916 return false;
2917 return pnfs_roc(inode);
2918 }
2919
2920 /*
2921 * It is possible for data to be read/written from a mem-mapped file
2922 * after the sys_close call (which hits the vfs layer as a flush).
2923 * This means that we can't safely call nfsv4 close on a file until
2924 * the inode is cleared. This in turn means that we are not good
2925 * NFSv4 citizens - we do not indicate to the server to update the file's
2926 * share state even when we are done with one of the three share
2927 * stateid's in the inode.
2928 *
2929 * NOTE: Caller must be holding the sp->so_owner semaphore!
2930 */
2931 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2932 {
2933 struct nfs_server *server = NFS_SERVER(state->inode);
2934 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
2935 struct nfs4_closedata *calldata;
2936 struct nfs4_state_owner *sp = state->owner;
2937 struct rpc_task *task;
2938 struct rpc_message msg = {
2939 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2940 .rpc_cred = state->owner->so_cred,
2941 };
2942 struct rpc_task_setup task_setup_data = {
2943 .rpc_client = server->client,
2944 .rpc_message = &msg,
2945 .callback_ops = &nfs4_close_ops,
2946 .workqueue = nfsiod_workqueue,
2947 .flags = RPC_TASK_ASYNC,
2948 };
2949 int status = -ENOMEM;
2950
2951 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
2952 &task_setup_data.rpc_client, &msg);
2953
2954 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2955 if (calldata == NULL)
2956 goto out;
2957 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2958 calldata->inode = state->inode;
2959 calldata->state = state;
2960 calldata->arg.fh = NFS_FH(state->inode);
2961 /* Serialization for the sequence id */
2962 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
2963 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
2964 if (IS_ERR(calldata->arg.seqid))
2965 goto out_free_calldata;
2966 calldata->arg.fmode = 0;
2967 calldata->arg.bitmask = server->cache_consistency_bitmask;
2968 calldata->res.fattr = &calldata->fattr;
2969 calldata->res.seqid = calldata->arg.seqid;
2970 calldata->res.server = server;
2971 calldata->roc = nfs4_roc(state->inode);
2972 nfs_sb_active(calldata->inode->i_sb);
2973
2974 msg.rpc_argp = &calldata->arg;
2975 msg.rpc_resp = &calldata->res;
2976 task_setup_data.callback_data = calldata;
2977 task = rpc_run_task(&task_setup_data);
2978 if (IS_ERR(task))
2979 return PTR_ERR(task);
2980 status = 0;
2981 if (wait)
2982 status = rpc_wait_for_completion_task(task);
2983 rpc_put_task(task);
2984 return status;
2985 out_free_calldata:
2986 kfree(calldata);
2987 out:
2988 nfs4_put_open_state(state);
2989 nfs4_put_state_owner(sp);
2990 return status;
2991 }
2992
2993 static struct inode *
2994 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2995 int open_flags, struct iattr *attr, int *opened)
2996 {
2997 struct nfs4_state *state;
2998 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
2999
3000 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3001
3002 /* Protect against concurrent sillydeletes */
3003 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3004
3005 nfs4_label_release_security(label);
3006
3007 if (IS_ERR(state))
3008 return ERR_CAST(state);
3009 return state->inode;
3010 }
3011
3012 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3013 {
3014 if (ctx->state == NULL)
3015 return;
3016 if (is_sync)
3017 nfs4_close_sync(ctx->state, ctx->mode);
3018 else
3019 nfs4_close_state(ctx->state, ctx->mode);
3020 }
3021
3022 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3023 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3024 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
3025
3026 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3027 {
3028 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3029 struct nfs4_server_caps_arg args = {
3030 .fhandle = fhandle,
3031 .bitmask = bitmask,
3032 };
3033 struct nfs4_server_caps_res res = {};
3034 struct rpc_message msg = {
3035 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3036 .rpc_argp = &args,
3037 .rpc_resp = &res,
3038 };
3039 int status;
3040
3041 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3042 FATTR4_WORD0_FH_EXPIRE_TYPE |
3043 FATTR4_WORD0_LINK_SUPPORT |
3044 FATTR4_WORD0_SYMLINK_SUPPORT |
3045 FATTR4_WORD0_ACLSUPPORT;
3046 if (minorversion)
3047 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3048
3049 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3050 if (status == 0) {
3051 /* Sanity check the server answers */
3052 switch (minorversion) {
3053 case 0:
3054 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3055 res.attr_bitmask[2] = 0;
3056 break;
3057 case 1:
3058 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3059 break;
3060 case 2:
3061 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3062 }
3063 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3064 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
3065 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
3066 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
3067 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
3068 NFS_CAP_CTIME|NFS_CAP_MTIME|
3069 NFS_CAP_SECURITY_LABEL);
3070 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3071 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3072 server->caps |= NFS_CAP_ACLS;
3073 if (res.has_links != 0)
3074 server->caps |= NFS_CAP_HARDLINKS;
3075 if (res.has_symlinks != 0)
3076 server->caps |= NFS_CAP_SYMLINKS;
3077 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
3078 server->caps |= NFS_CAP_FILEID;
3079 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
3080 server->caps |= NFS_CAP_MODE;
3081 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
3082 server->caps |= NFS_CAP_NLINK;
3083 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
3084 server->caps |= NFS_CAP_OWNER;
3085 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
3086 server->caps |= NFS_CAP_OWNER_GROUP;
3087 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
3088 server->caps |= NFS_CAP_ATIME;
3089 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
3090 server->caps |= NFS_CAP_CTIME;
3091 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
3092 server->caps |= NFS_CAP_MTIME;
3093 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3094 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3095 server->caps |= NFS_CAP_SECURITY_LABEL;
3096 #endif
3097 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3098 sizeof(server->attr_bitmask));
3099 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3100
3101 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3102 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3103 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3104 server->cache_consistency_bitmask[2] = 0;
3105 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3106 sizeof(server->exclcreat_bitmask));
3107 server->acl_bitmask = res.acl_bitmask;
3108 server->fh_expire_type = res.fh_expire_type;
3109 }
3110
3111 return status;
3112 }
3113
3114 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3115 {
3116 struct nfs4_exception exception = { };
3117 int err;
3118 do {
3119 err = nfs4_handle_exception(server,
3120 _nfs4_server_capabilities(server, fhandle),
3121 &exception);
3122 } while (exception.retry);
3123 return err;
3124 }
3125
3126 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3127 struct nfs_fsinfo *info)
3128 {
3129 u32 bitmask[3];
3130 struct nfs4_lookup_root_arg args = {
3131 .bitmask = bitmask,
3132 };
3133 struct nfs4_lookup_res res = {
3134 .server = server,
3135 .fattr = info->fattr,
3136 .fh = fhandle,
3137 };
3138 struct rpc_message msg = {
3139 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3140 .rpc_argp = &args,
3141 .rpc_resp = &res,
3142 };
3143
3144 bitmask[0] = nfs4_fattr_bitmap[0];
3145 bitmask[1] = nfs4_fattr_bitmap[1];
3146 /*
3147 * Process the label in the upcoming getfattr
3148 */
3149 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3150
3151 nfs_fattr_init(info->fattr);
3152 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3153 }
3154
3155 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3156 struct nfs_fsinfo *info)
3157 {
3158 struct nfs4_exception exception = { };
3159 int err;
3160 do {
3161 err = _nfs4_lookup_root(server, fhandle, info);
3162 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3163 switch (err) {
3164 case 0:
3165 case -NFS4ERR_WRONGSEC:
3166 goto out;
3167 default:
3168 err = nfs4_handle_exception(server, err, &exception);
3169 }
3170 } while (exception.retry);
3171 out:
3172 return err;
3173 }
3174
3175 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3176 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3177 {
3178 struct rpc_auth_create_args auth_args = {
3179 .pseudoflavor = flavor,
3180 };
3181 struct rpc_auth *auth;
3182 int ret;
3183
3184 auth = rpcauth_create(&auth_args, server->client);
3185 if (IS_ERR(auth)) {
3186 ret = -EACCES;
3187 goto out;
3188 }
3189 ret = nfs4_lookup_root(server, fhandle, info);
3190 out:
3191 return ret;
3192 }
3193
3194 /*
3195 * Retry pseudoroot lookup with various security flavors. We do this when:
3196 *
3197 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3198 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3199 *
3200 * Returns zero on success, or a negative NFS4ERR value, or a
3201 * negative errno value.
3202 */
3203 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3204 struct nfs_fsinfo *info)
3205 {
3206 /* Per 3530bis 15.33.5 */
3207 static const rpc_authflavor_t flav_array[] = {
3208 RPC_AUTH_GSS_KRB5P,
3209 RPC_AUTH_GSS_KRB5I,
3210 RPC_AUTH_GSS_KRB5,
3211 RPC_AUTH_UNIX, /* courtesy */
3212 RPC_AUTH_NULL,
3213 };
3214 int status = -EPERM;
3215 size_t i;
3216
3217 if (server->auth_info.flavor_len > 0) {
3218 /* try each flavor specified by user */
3219 for (i = 0; i < server->auth_info.flavor_len; i++) {
3220 status = nfs4_lookup_root_sec(server, fhandle, info,
3221 server->auth_info.flavors[i]);
3222 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3223 continue;
3224 break;
3225 }
3226 } else {
3227 /* no flavors specified by user, try default list */
3228 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3229 status = nfs4_lookup_root_sec(server, fhandle, info,
3230 flav_array[i]);
3231 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3232 continue;
3233 break;
3234 }
3235 }
3236
3237 /*
3238 * -EACCESS could mean that the user doesn't have correct permissions
3239 * to access the mount. It could also mean that we tried to mount
3240 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3241 * existing mount programs don't handle -EACCES very well so it should
3242 * be mapped to -EPERM instead.
3243 */
3244 if (status == -EACCES)
3245 status = -EPERM;
3246 return status;
3247 }
3248
3249 static int nfs4_do_find_root_sec(struct nfs_server *server,
3250 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
3251 {
3252 int mv = server->nfs_client->cl_minorversion;
3253 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
3254 }
3255
3256 /**
3257 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3258 * @server: initialized nfs_server handle
3259 * @fhandle: we fill in the pseudo-fs root file handle
3260 * @info: we fill in an FSINFO struct
3261 * @auth_probe: probe the auth flavours
3262 *
3263 * Returns zero on success, or a negative errno.
3264 */
3265 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3266 struct nfs_fsinfo *info,
3267 bool auth_probe)
3268 {
3269 int status = 0;
3270
3271 if (!auth_probe)
3272 status = nfs4_lookup_root(server, fhandle, info);
3273
3274 if (auth_probe || status == NFS4ERR_WRONGSEC)
3275 status = nfs4_do_find_root_sec(server, fhandle, info);
3276
3277 if (status == 0)
3278 status = nfs4_server_capabilities(server, fhandle);
3279 if (status == 0)
3280 status = nfs4_do_fsinfo(server, fhandle, info);
3281
3282 return nfs4_map_errors(status);
3283 }
3284
3285 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3286 struct nfs_fsinfo *info)
3287 {
3288 int error;
3289 struct nfs_fattr *fattr = info->fattr;
3290 struct nfs4_label *label = NULL;
3291
3292 error = nfs4_server_capabilities(server, mntfh);
3293 if (error < 0) {
3294 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3295 return error;
3296 }
3297
3298 label = nfs4_label_alloc(server, GFP_KERNEL);
3299 if (IS_ERR(label))
3300 return PTR_ERR(label);
3301
3302 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3303 if (error < 0) {
3304 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3305 goto err_free_label;
3306 }
3307
3308 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3309 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3310 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3311
3312 err_free_label:
3313 nfs4_label_free(label);
3314
3315 return error;
3316 }
3317
3318 /*
3319 * Get locations and (maybe) other attributes of a referral.
3320 * Note that we'll actually follow the referral later when
3321 * we detect fsid mismatch in inode revalidation
3322 */
3323 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3324 const struct qstr *name, struct nfs_fattr *fattr,
3325 struct nfs_fh *fhandle)
3326 {
3327 int status = -ENOMEM;
3328 struct page *page = NULL;
3329 struct nfs4_fs_locations *locations = NULL;
3330
3331 page = alloc_page(GFP_KERNEL);
3332 if (page == NULL)
3333 goto out;
3334 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3335 if (locations == NULL)
3336 goto out;
3337
3338 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3339 if (status != 0)
3340 goto out;
3341
3342 /*
3343 * If the fsid didn't change, this is a migration event, not a
3344 * referral. Cause us to drop into the exception handler, which
3345 * will kick off migration recovery.
3346 */
3347 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3348 dprintk("%s: server did not return a different fsid for"
3349 " a referral at %s\n", __func__, name->name);
3350 status = -NFS4ERR_MOVED;
3351 goto out;
3352 }
3353 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3354 nfs_fixup_referral_attributes(&locations->fattr);
3355
3356 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3357 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3358 memset(fhandle, 0, sizeof(struct nfs_fh));
3359 out:
3360 if (page)
3361 __free_page(page);
3362 kfree(locations);
3363 return status;
3364 }
3365
3366 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3367 struct nfs_fattr *fattr, struct nfs4_label *label)
3368 {
3369 struct nfs4_getattr_arg args = {
3370 .fh = fhandle,
3371 .bitmask = server->attr_bitmask,
3372 };
3373 struct nfs4_getattr_res res = {
3374 .fattr = fattr,
3375 .label = label,
3376 .server = server,
3377 };
3378 struct rpc_message msg = {
3379 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3380 .rpc_argp = &args,
3381 .rpc_resp = &res,
3382 };
3383
3384 args.bitmask = nfs4_bitmask(server, label);
3385
3386 nfs_fattr_init(fattr);
3387 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3388 }
3389
3390 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3391 struct nfs_fattr *fattr, struct nfs4_label *label)
3392 {
3393 struct nfs4_exception exception = { };
3394 int err;
3395 do {
3396 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3397 trace_nfs4_getattr(server, fhandle, fattr, err);
3398 err = nfs4_handle_exception(server, err,
3399 &exception);
3400 } while (exception.retry);
3401 return err;
3402 }
3403
3404 /*
3405 * The file is not closed if it is opened due to the a request to change
3406 * the size of the file. The open call will not be needed once the
3407 * VFS layer lookup-intents are implemented.
3408 *
3409 * Close is called when the inode is destroyed.
3410 * If we haven't opened the file for O_WRONLY, we
3411 * need to in the size_change case to obtain a stateid.
3412 *
3413 * Got race?
3414 * Because OPEN is always done by name in nfsv4, it is
3415 * possible that we opened a different file by the same
3416 * name. We can recognize this race condition, but we
3417 * can't do anything about it besides returning an error.
3418 *
3419 * This will be fixed with VFS changes (lookup-intent).
3420 */
3421 static int
3422 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3423 struct iattr *sattr)
3424 {
3425 struct inode *inode = d_inode(dentry);
3426 struct rpc_cred *cred = NULL;
3427 struct nfs4_state *state = NULL;
3428 struct nfs4_label *label = NULL;
3429 int status;
3430
3431 if (pnfs_ld_layoutret_on_setattr(inode) &&
3432 sattr->ia_valid & ATTR_SIZE &&
3433 sattr->ia_size < i_size_read(inode))
3434 pnfs_commit_and_return_layout(inode);
3435
3436 nfs_fattr_init(fattr);
3437
3438 /* Deal with open(O_TRUNC) */
3439 if (sattr->ia_valid & ATTR_OPEN)
3440 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3441
3442 /* Optimization: if the end result is no change, don't RPC */
3443 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3444 return 0;
3445
3446 /* Search for an existing open(O_WRITE) file */
3447 if (sattr->ia_valid & ATTR_FILE) {
3448 struct nfs_open_context *ctx;
3449
3450 ctx = nfs_file_open_context(sattr->ia_file);
3451 if (ctx) {
3452 cred = ctx->cred;
3453 state = ctx->state;
3454 }
3455 }
3456
3457 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3458 if (IS_ERR(label))
3459 return PTR_ERR(label);
3460
3461 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3462 if (status == 0) {
3463 nfs_setattr_update_inode(inode, sattr, fattr);
3464 nfs_setsecurity(inode, fattr, label);
3465 }
3466 nfs4_label_free(label);
3467 return status;
3468 }
3469
3470 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3471 const struct qstr *name, struct nfs_fh *fhandle,
3472 struct nfs_fattr *fattr, struct nfs4_label *label)
3473 {
3474 struct nfs_server *server = NFS_SERVER(dir);
3475 int status;
3476 struct nfs4_lookup_arg args = {
3477 .bitmask = server->attr_bitmask,
3478 .dir_fh = NFS_FH(dir),
3479 .name = name,
3480 };
3481 struct nfs4_lookup_res res = {
3482 .server = server,
3483 .fattr = fattr,
3484 .label = label,
3485 .fh = fhandle,
3486 };
3487 struct rpc_message msg = {
3488 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3489 .rpc_argp = &args,
3490 .rpc_resp = &res,
3491 };
3492
3493 args.bitmask = nfs4_bitmask(server, label);
3494
3495 nfs_fattr_init(fattr);
3496
3497 dprintk("NFS call lookup %s\n", name->name);
3498 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3499 dprintk("NFS reply lookup: %d\n", status);
3500 return status;
3501 }
3502
3503 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3504 {
3505 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3506 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3507 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3508 fattr->nlink = 2;
3509 }
3510
3511 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3512 struct qstr *name, struct nfs_fh *fhandle,
3513 struct nfs_fattr *fattr, struct nfs4_label *label)
3514 {
3515 struct nfs4_exception exception = { };
3516 struct rpc_clnt *client = *clnt;
3517 int err;
3518 do {
3519 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3520 trace_nfs4_lookup(dir, name, err);
3521 switch (err) {
3522 case -NFS4ERR_BADNAME:
3523 err = -ENOENT;
3524 goto out;
3525 case -NFS4ERR_MOVED:
3526 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3527 if (err == -NFS4ERR_MOVED)
3528 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3529 goto out;
3530 case -NFS4ERR_WRONGSEC:
3531 err = -EPERM;
3532 if (client != *clnt)
3533 goto out;
3534 client = nfs4_negotiate_security(client, dir, name);
3535 if (IS_ERR(client))
3536 return PTR_ERR(client);
3537
3538 exception.retry = 1;
3539 break;
3540 default:
3541 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3542 }
3543 } while (exception.retry);
3544
3545 out:
3546 if (err == 0)
3547 *clnt = client;
3548 else if (client != *clnt)
3549 rpc_shutdown_client(client);
3550
3551 return err;
3552 }
3553
3554 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
3555 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3556 struct nfs4_label *label)
3557 {
3558 int status;
3559 struct rpc_clnt *client = NFS_CLIENT(dir);
3560
3561 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3562 if (client != NFS_CLIENT(dir)) {
3563 rpc_shutdown_client(client);
3564 nfs_fixup_secinfo_attributes(fattr);
3565 }
3566 return status;
3567 }
3568
3569 struct rpc_clnt *
3570 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3571 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3572 {
3573 struct rpc_clnt *client = NFS_CLIENT(dir);
3574 int status;
3575
3576 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3577 if (status < 0)
3578 return ERR_PTR(status);
3579 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3580 }
3581
3582 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3583 {
3584 struct nfs_server *server = NFS_SERVER(inode);
3585 struct nfs4_accessargs args = {
3586 .fh = NFS_FH(inode),
3587 .bitmask = server->cache_consistency_bitmask,
3588 };
3589 struct nfs4_accessres res = {
3590 .server = server,
3591 };
3592 struct rpc_message msg = {
3593 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3594 .rpc_argp = &args,
3595 .rpc_resp = &res,
3596 .rpc_cred = entry->cred,
3597 };
3598 int mode = entry->mask;
3599 int status = 0;
3600
3601 /*
3602 * Determine which access bits we want to ask for...
3603 */
3604 if (mode & MAY_READ)
3605 args.access |= NFS4_ACCESS_READ;
3606 if (S_ISDIR(inode->i_mode)) {
3607 if (mode & MAY_WRITE)
3608 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3609 if (mode & MAY_EXEC)
3610 args.access |= NFS4_ACCESS_LOOKUP;
3611 } else {
3612 if (mode & MAY_WRITE)
3613 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3614 if (mode & MAY_EXEC)
3615 args.access |= NFS4_ACCESS_EXECUTE;
3616 }
3617
3618 res.fattr = nfs_alloc_fattr();
3619 if (res.fattr == NULL)
3620 return -ENOMEM;
3621
3622 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3623 if (!status) {
3624 nfs_access_set_mask(entry, res.access);
3625 nfs_refresh_inode(inode, res.fattr);
3626 }
3627 nfs_free_fattr(res.fattr);
3628 return status;
3629 }
3630
3631 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3632 {
3633 struct nfs4_exception exception = { };
3634 int err;
3635 do {
3636 err = _nfs4_proc_access(inode, entry);
3637 trace_nfs4_access(inode, err);
3638 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3639 &exception);
3640 } while (exception.retry);
3641 return err;
3642 }
3643
3644 /*
3645 * TODO: For the time being, we don't try to get any attributes
3646 * along with any of the zero-copy operations READ, READDIR,
3647 * READLINK, WRITE.
3648 *
3649 * In the case of the first three, we want to put the GETATTR
3650 * after the read-type operation -- this is because it is hard
3651 * to predict the length of a GETATTR response in v4, and thus
3652 * align the READ data correctly. This means that the GETATTR
3653 * may end up partially falling into the page cache, and we should
3654 * shift it into the 'tail' of the xdr_buf before processing.
3655 * To do this efficiently, we need to know the total length
3656 * of data received, which doesn't seem to be available outside
3657 * of the RPC layer.
3658 *
3659 * In the case of WRITE, we also want to put the GETATTR after
3660 * the operation -- in this case because we want to make sure
3661 * we get the post-operation mtime and size.
3662 *
3663 * Both of these changes to the XDR layer would in fact be quite
3664 * minor, but I decided to leave them for a subsequent patch.
3665 */
3666 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3667 unsigned int pgbase, unsigned int pglen)
3668 {
3669 struct nfs4_readlink args = {
3670 .fh = NFS_FH(inode),
3671 .pgbase = pgbase,
3672 .pglen = pglen,
3673 .pages = &page,
3674 };
3675 struct nfs4_readlink_res res;
3676 struct rpc_message msg = {
3677 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3678 .rpc_argp = &args,
3679 .rpc_resp = &res,
3680 };
3681
3682 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3683 }
3684
3685 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3686 unsigned int pgbase, unsigned int pglen)
3687 {
3688 struct nfs4_exception exception = { };
3689 int err;
3690 do {
3691 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3692 trace_nfs4_readlink(inode, err);
3693 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3694 &exception);
3695 } while (exception.retry);
3696 return err;
3697 }
3698
3699 /*
3700 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3701 */
3702 static int
3703 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3704 int flags)
3705 {
3706 struct nfs4_label l, *ilabel = NULL;
3707 struct nfs_open_context *ctx;
3708 struct nfs4_state *state;
3709 int status = 0;
3710
3711 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3712 if (IS_ERR(ctx))
3713 return PTR_ERR(ctx);
3714
3715 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3716
3717 sattr->ia_mode &= ~current_umask();
3718 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
3719 if (IS_ERR(state)) {
3720 status = PTR_ERR(state);
3721 goto out;
3722 }
3723 out:
3724 nfs4_label_release_security(ilabel);
3725 put_nfs_open_context(ctx);
3726 return status;
3727 }
3728
3729 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3730 {
3731 struct nfs_server *server = NFS_SERVER(dir);
3732 struct nfs_removeargs args = {
3733 .fh = NFS_FH(dir),
3734 .name = *name,
3735 };
3736 struct nfs_removeres res = {
3737 .server = server,
3738 };
3739 struct rpc_message msg = {
3740 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3741 .rpc_argp = &args,
3742 .rpc_resp = &res,
3743 };
3744 int status;
3745
3746 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3747 if (status == 0)
3748 update_changeattr(dir, &res.cinfo);
3749 return status;
3750 }
3751
3752 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3753 {
3754 struct nfs4_exception exception = { };
3755 int err;
3756 do {
3757 err = _nfs4_proc_remove(dir, name);
3758 trace_nfs4_remove(dir, name, err);
3759 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3760 &exception);
3761 } while (exception.retry);
3762 return err;
3763 }
3764
3765 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3766 {
3767 struct nfs_server *server = NFS_SERVER(dir);
3768 struct nfs_removeargs *args = msg->rpc_argp;
3769 struct nfs_removeres *res = msg->rpc_resp;
3770
3771 res->server = server;
3772 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3773 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3774
3775 nfs_fattr_init(res->dir_attr);
3776 }
3777
3778 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3779 {
3780 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb),
3781 &data->args.seq_args,
3782 &data->res.seq_res,
3783 task);
3784 }
3785
3786 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3787 {
3788 struct nfs_unlinkdata *data = task->tk_calldata;
3789 struct nfs_removeres *res = &data->res;
3790
3791 if (!nfs4_sequence_done(task, &res->seq_res))
3792 return 0;
3793 if (nfs4_async_handle_error(task, res->server, NULL,
3794 &data->timeout) == -EAGAIN)
3795 return 0;
3796 update_changeattr(dir, &res->cinfo);
3797 return 1;
3798 }
3799
3800 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3801 {
3802 struct nfs_server *server = NFS_SERVER(dir);
3803 struct nfs_renameargs *arg = msg->rpc_argp;
3804 struct nfs_renameres *res = msg->rpc_resp;
3805
3806 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3807 res->server = server;
3808 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3809 }
3810
3811 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3812 {
3813 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3814 &data->args.seq_args,
3815 &data->res.seq_res,
3816 task);
3817 }
3818
3819 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3820 struct inode *new_dir)
3821 {
3822 struct nfs_renamedata *data = task->tk_calldata;
3823 struct nfs_renameres *res = &data->res;
3824
3825 if (!nfs4_sequence_done(task, &res->seq_res))
3826 return 0;
3827 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3828 return 0;
3829
3830 update_changeattr(old_dir, &res->old_cinfo);
3831 update_changeattr(new_dir, &res->new_cinfo);
3832 return 1;
3833 }
3834
3835 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3836 {
3837 struct nfs_server *server = NFS_SERVER(inode);
3838 struct nfs4_link_arg arg = {
3839 .fh = NFS_FH(inode),
3840 .dir_fh = NFS_FH(dir),
3841 .name = name,
3842 .bitmask = server->attr_bitmask,
3843 };
3844 struct nfs4_link_res res = {
3845 .server = server,
3846 .label = NULL,
3847 };
3848 struct rpc_message msg = {
3849 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3850 .rpc_argp = &arg,
3851 .rpc_resp = &res,
3852 };
3853 int status = -ENOMEM;
3854
3855 res.fattr = nfs_alloc_fattr();
3856 if (res.fattr == NULL)
3857 goto out;
3858
3859 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3860 if (IS_ERR(res.label)) {
3861 status = PTR_ERR(res.label);
3862 goto out;
3863 }
3864 arg.bitmask = nfs4_bitmask(server, res.label);
3865
3866 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3867 if (!status) {
3868 update_changeattr(dir, &res.cinfo);
3869 status = nfs_post_op_update_inode(inode, res.fattr);
3870 if (!status)
3871 nfs_setsecurity(inode, res.fattr, res.label);
3872 }
3873
3874
3875 nfs4_label_free(res.label);
3876
3877 out:
3878 nfs_free_fattr(res.fattr);
3879 return status;
3880 }
3881
3882 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3883 {
3884 struct nfs4_exception exception = { };
3885 int err;
3886 do {
3887 err = nfs4_handle_exception(NFS_SERVER(inode),
3888 _nfs4_proc_link(inode, dir, name),
3889 &exception);
3890 } while (exception.retry);
3891 return err;
3892 }
3893
3894 struct nfs4_createdata {
3895 struct rpc_message msg;
3896 struct nfs4_create_arg arg;
3897 struct nfs4_create_res res;
3898 struct nfs_fh fh;
3899 struct nfs_fattr fattr;
3900 struct nfs4_label *label;
3901 };
3902
3903 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3904 struct qstr *name, struct iattr *sattr, u32 ftype)
3905 {
3906 struct nfs4_createdata *data;
3907
3908 data = kzalloc(sizeof(*data), GFP_KERNEL);
3909 if (data != NULL) {
3910 struct nfs_server *server = NFS_SERVER(dir);
3911
3912 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3913 if (IS_ERR(data->label))
3914 goto out_free;
3915
3916 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3917 data->msg.rpc_argp = &data->arg;
3918 data->msg.rpc_resp = &data->res;
3919 data->arg.dir_fh = NFS_FH(dir);
3920 data->arg.server = server;
3921 data->arg.name = name;
3922 data->arg.attrs = sattr;
3923 data->arg.ftype = ftype;
3924 data->arg.bitmask = nfs4_bitmask(server, data->label);
3925 data->res.server = server;
3926 data->res.fh = &data->fh;
3927 data->res.fattr = &data->fattr;
3928 data->res.label = data->label;
3929 nfs_fattr_init(data->res.fattr);
3930 }
3931 return data;
3932 out_free:
3933 kfree(data);
3934 return NULL;
3935 }
3936
3937 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3938 {
3939 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3940 &data->arg.seq_args, &data->res.seq_res, 1);
3941 if (status == 0) {
3942 update_changeattr(dir, &data->res.dir_cinfo);
3943 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
3944 }
3945 return status;
3946 }
3947
3948 static void nfs4_free_createdata(struct nfs4_createdata *data)
3949 {
3950 nfs4_label_free(data->label);
3951 kfree(data);
3952 }
3953
3954 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3955 struct page *page, unsigned int len, struct iattr *sattr,
3956 struct nfs4_label *label)
3957 {
3958 struct nfs4_createdata *data;
3959 int status = -ENAMETOOLONG;
3960
3961 if (len > NFS4_MAXPATHLEN)
3962 goto out;
3963
3964 status = -ENOMEM;
3965 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3966 if (data == NULL)
3967 goto out;
3968
3969 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3970 data->arg.u.symlink.pages = &page;
3971 data->arg.u.symlink.len = len;
3972 data->arg.label = label;
3973
3974 status = nfs4_do_create(dir, dentry, data);
3975
3976 nfs4_free_createdata(data);
3977 out:
3978 return status;
3979 }
3980
3981 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3982 struct page *page, unsigned int len, struct iattr *sattr)
3983 {
3984 struct nfs4_exception exception = { };
3985 struct nfs4_label l, *label = NULL;
3986 int err;
3987
3988 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3989
3990 do {
3991 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
3992 trace_nfs4_symlink(dir, &dentry->d_name, err);
3993 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3994 &exception);
3995 } while (exception.retry);
3996
3997 nfs4_label_release_security(label);
3998 return err;
3999 }
4000
4001 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4002 struct iattr *sattr, struct nfs4_label *label)
4003 {
4004 struct nfs4_createdata *data;
4005 int status = -ENOMEM;
4006
4007 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
4008 if (data == NULL)
4009 goto out;
4010
4011 data->arg.label = label;
4012 status = nfs4_do_create(dir, dentry, data);
4013
4014 nfs4_free_createdata(data);
4015 out:
4016 return status;
4017 }
4018
4019 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4020 struct iattr *sattr)
4021 {
4022 struct nfs4_exception exception = { };
4023 struct nfs4_label l, *label = NULL;
4024 int err;
4025
4026 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4027
4028 sattr->ia_mode &= ~current_umask();
4029 do {
4030 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
4031 trace_nfs4_mkdir(dir, &dentry->d_name, err);
4032 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4033 &exception);
4034 } while (exception.retry);
4035 nfs4_label_release_security(label);
4036
4037 return err;
4038 }
4039
4040 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4041 u64 cookie, struct page **pages, unsigned int count, int plus)
4042 {
4043 struct inode *dir = d_inode(dentry);
4044 struct nfs4_readdir_arg args = {
4045 .fh = NFS_FH(dir),
4046 .pages = pages,
4047 .pgbase = 0,
4048 .count = count,
4049 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
4050 .plus = plus,
4051 };
4052 struct nfs4_readdir_res res;
4053 struct rpc_message msg = {
4054 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
4055 .rpc_argp = &args,
4056 .rpc_resp = &res,
4057 .rpc_cred = cred,
4058 };
4059 int status;
4060
4061 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
4062 dentry,
4063 (unsigned long long)cookie);
4064 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
4065 res.pgbase = args.pgbase;
4066 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
4067 if (status >= 0) {
4068 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
4069 status += args.pgbase;
4070 }
4071
4072 nfs_invalidate_atime(dir);
4073
4074 dprintk("%s: returns %d\n", __func__, status);
4075 return status;
4076 }
4077
4078 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4079 u64 cookie, struct page **pages, unsigned int count, int plus)
4080 {
4081 struct nfs4_exception exception = { };
4082 int err;
4083 do {
4084 err = _nfs4_proc_readdir(dentry, cred, cookie,
4085 pages, count, plus);
4086 trace_nfs4_readdir(d_inode(dentry), err);
4087 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
4088 &exception);
4089 } while (exception.retry);
4090 return err;
4091 }
4092
4093 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4094 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
4095 {
4096 struct nfs4_createdata *data;
4097 int mode = sattr->ia_mode;
4098 int status = -ENOMEM;
4099
4100 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
4101 if (data == NULL)
4102 goto out;
4103
4104 if (S_ISFIFO(mode))
4105 data->arg.ftype = NF4FIFO;
4106 else if (S_ISBLK(mode)) {
4107 data->arg.ftype = NF4BLK;
4108 data->arg.u.device.specdata1 = MAJOR(rdev);
4109 data->arg.u.device.specdata2 = MINOR(rdev);
4110 }
4111 else if (S_ISCHR(mode)) {
4112 data->arg.ftype = NF4CHR;
4113 data->arg.u.device.specdata1 = MAJOR(rdev);
4114 data->arg.u.device.specdata2 = MINOR(rdev);
4115 } else if (!S_ISSOCK(mode)) {
4116 status = -EINVAL;
4117 goto out_free;
4118 }
4119
4120 data->arg.label = label;
4121 status = nfs4_do_create(dir, dentry, data);
4122 out_free:
4123 nfs4_free_createdata(data);
4124 out:
4125 return status;
4126 }
4127
4128 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4129 struct iattr *sattr, dev_t rdev)
4130 {
4131 struct nfs4_exception exception = { };
4132 struct nfs4_label l, *label = NULL;
4133 int err;
4134
4135 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4136
4137 sattr->ia_mode &= ~current_umask();
4138 do {
4139 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
4140 trace_nfs4_mknod(dir, &dentry->d_name, err);
4141 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4142 &exception);
4143 } while (exception.retry);
4144
4145 nfs4_label_release_security(label);
4146
4147 return err;
4148 }
4149
4150 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
4151 struct nfs_fsstat *fsstat)
4152 {
4153 struct nfs4_statfs_arg args = {
4154 .fh = fhandle,
4155 .bitmask = server->attr_bitmask,
4156 };
4157 struct nfs4_statfs_res res = {
4158 .fsstat = fsstat,
4159 };
4160 struct rpc_message msg = {
4161 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
4162 .rpc_argp = &args,
4163 .rpc_resp = &res,
4164 };
4165
4166 nfs_fattr_init(fsstat->fattr);
4167 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4168 }
4169
4170 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4171 {
4172 struct nfs4_exception exception = { };
4173 int err;
4174 do {
4175 err = nfs4_handle_exception(server,
4176 _nfs4_proc_statfs(server, fhandle, fsstat),
4177 &exception);
4178 } while (exception.retry);
4179 return err;
4180 }
4181
4182 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4183 struct nfs_fsinfo *fsinfo)
4184 {
4185 struct nfs4_fsinfo_arg args = {
4186 .fh = fhandle,
4187 .bitmask = server->attr_bitmask,
4188 };
4189 struct nfs4_fsinfo_res res = {
4190 .fsinfo = fsinfo,
4191 };
4192 struct rpc_message msg = {
4193 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4194 .rpc_argp = &args,
4195 .rpc_resp = &res,
4196 };
4197
4198 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4199 }
4200
4201 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4202 {
4203 struct nfs4_exception exception = { };
4204 unsigned long now = jiffies;
4205 int err;
4206
4207 do {
4208 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4209 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4210 if (err == 0) {
4211 struct nfs_client *clp = server->nfs_client;
4212
4213 spin_lock(&clp->cl_lock);
4214 clp->cl_lease_time = fsinfo->lease_time * HZ;
4215 clp->cl_last_renewal = now;
4216 spin_unlock(&clp->cl_lock);
4217 break;
4218 }
4219 err = nfs4_handle_exception(server, err, &exception);
4220 } while (exception.retry);
4221 return err;
4222 }
4223
4224 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4225 {
4226 int error;
4227
4228 nfs_fattr_init(fsinfo->fattr);
4229 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4230 if (error == 0) {
4231 /* block layout checks this! */
4232 server->pnfs_blksize = fsinfo->blksize;
4233 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4234 }
4235
4236 return error;
4237 }
4238
4239 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4240 struct nfs_pathconf *pathconf)
4241 {
4242 struct nfs4_pathconf_arg args = {
4243 .fh = fhandle,
4244 .bitmask = server->attr_bitmask,
4245 };
4246 struct nfs4_pathconf_res res = {
4247 .pathconf = pathconf,
4248 };
4249 struct rpc_message msg = {
4250 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4251 .rpc_argp = &args,
4252 .rpc_resp = &res,
4253 };
4254
4255 /* None of the pathconf attributes are mandatory to implement */
4256 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4257 memset(pathconf, 0, sizeof(*pathconf));
4258 return 0;
4259 }
4260
4261 nfs_fattr_init(pathconf->fattr);
4262 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4263 }
4264
4265 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4266 struct nfs_pathconf *pathconf)
4267 {
4268 struct nfs4_exception exception = { };
4269 int err;
4270
4271 do {
4272 err = nfs4_handle_exception(server,
4273 _nfs4_proc_pathconf(server, fhandle, pathconf),
4274 &exception);
4275 } while (exception.retry);
4276 return err;
4277 }
4278
4279 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4280 const struct nfs_open_context *ctx,
4281 const struct nfs_lock_context *l_ctx,
4282 fmode_t fmode)
4283 {
4284 const struct nfs_lockowner *lockowner = NULL;
4285
4286 if (l_ctx != NULL)
4287 lockowner = &l_ctx->lockowner;
4288 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
4289 }
4290 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4291
4292 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4293 const struct nfs_open_context *ctx,
4294 const struct nfs_lock_context *l_ctx,
4295 fmode_t fmode)
4296 {
4297 nfs4_stateid current_stateid;
4298
4299 /* If the current stateid represents a lost lock, then exit */
4300 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4301 return true;
4302 return nfs4_stateid_match(stateid, &current_stateid);
4303 }
4304
4305 static bool nfs4_error_stateid_expired(int err)
4306 {
4307 switch (err) {
4308 case -NFS4ERR_DELEG_REVOKED:
4309 case -NFS4ERR_ADMIN_REVOKED:
4310 case -NFS4ERR_BAD_STATEID:
4311 case -NFS4ERR_STALE_STATEID:
4312 case -NFS4ERR_OLD_STATEID:
4313 case -NFS4ERR_OPENMODE:
4314 case -NFS4ERR_EXPIRED:
4315 return true;
4316 }
4317 return false;
4318 }
4319
4320 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4321 {
4322 nfs_invalidate_atime(hdr->inode);
4323 }
4324
4325 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4326 {
4327 struct nfs_server *server = NFS_SERVER(hdr->inode);
4328
4329 trace_nfs4_read(hdr, task->tk_status);
4330 if (nfs4_async_handle_error(task, server,
4331 hdr->args.context->state,
4332 NULL) == -EAGAIN) {
4333 rpc_restart_call_prepare(task);
4334 return -EAGAIN;
4335 }
4336
4337 __nfs4_read_done_cb(hdr);
4338 if (task->tk_status > 0)
4339 renew_lease(server, hdr->timestamp);
4340 return 0;
4341 }
4342
4343 static bool nfs4_read_stateid_changed(struct rpc_task *task,
4344 struct nfs_pgio_args *args)
4345 {
4346
4347 if (!nfs4_error_stateid_expired(task->tk_status) ||
4348 nfs4_stateid_is_current(&args->stateid,
4349 args->context,
4350 args->lock_context,
4351 FMODE_READ))
4352 return false;
4353 rpc_restart_call_prepare(task);
4354 return true;
4355 }
4356
4357 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4358 {
4359
4360 dprintk("--> %s\n", __func__);
4361
4362 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4363 return -EAGAIN;
4364 if (nfs4_read_stateid_changed(task, &hdr->args))
4365 return -EAGAIN;
4366 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4367 nfs4_read_done_cb(task, hdr);
4368 }
4369
4370 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4371 struct rpc_message *msg)
4372 {
4373 hdr->timestamp = jiffies;
4374 hdr->pgio_done_cb = nfs4_read_done_cb;
4375 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4376 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4377 }
4378
4379 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4380 struct nfs_pgio_header *hdr)
4381 {
4382 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4383 &hdr->args.seq_args,
4384 &hdr->res.seq_res,
4385 task))
4386 return 0;
4387 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4388 hdr->args.lock_context,
4389 hdr->rw_ops->rw_mode) == -EIO)
4390 return -EIO;
4391 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4392 return -EIO;
4393 return 0;
4394 }
4395
4396 static int nfs4_write_done_cb(struct rpc_task *task,
4397 struct nfs_pgio_header *hdr)
4398 {
4399 struct inode *inode = hdr->inode;
4400
4401 trace_nfs4_write(hdr, task->tk_status);
4402 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4403 hdr->args.context->state,
4404 NULL) == -EAGAIN) {
4405 rpc_restart_call_prepare(task);
4406 return -EAGAIN;
4407 }
4408 if (task->tk_status >= 0) {
4409 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4410 nfs_writeback_update_inode(hdr);
4411 }
4412 return 0;
4413 }
4414
4415 static bool nfs4_write_stateid_changed(struct rpc_task *task,
4416 struct nfs_pgio_args *args)
4417 {
4418
4419 if (!nfs4_error_stateid_expired(task->tk_status) ||
4420 nfs4_stateid_is_current(&args->stateid,
4421 args->context,
4422 args->lock_context,
4423 FMODE_WRITE))
4424 return false;
4425 rpc_restart_call_prepare(task);
4426 return true;
4427 }
4428
4429 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4430 {
4431 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4432 return -EAGAIN;
4433 if (nfs4_write_stateid_changed(task, &hdr->args))
4434 return -EAGAIN;
4435 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4436 nfs4_write_done_cb(task, hdr);
4437 }
4438
4439 static
4440 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4441 {
4442 /* Don't request attributes for pNFS or O_DIRECT writes */
4443 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4444 return false;
4445 /* Otherwise, request attributes if and only if we don't hold
4446 * a delegation
4447 */
4448 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4449 }
4450
4451 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4452 struct rpc_message *msg)
4453 {
4454 struct nfs_server *server = NFS_SERVER(hdr->inode);
4455
4456 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4457 hdr->args.bitmask = NULL;
4458 hdr->res.fattr = NULL;
4459 } else
4460 hdr->args.bitmask = server->cache_consistency_bitmask;
4461
4462 if (!hdr->pgio_done_cb)
4463 hdr->pgio_done_cb = nfs4_write_done_cb;
4464 hdr->res.server = server;
4465 hdr->timestamp = jiffies;
4466
4467 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4468 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4469 }
4470
4471 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4472 {
4473 nfs4_setup_sequence(NFS_SERVER(data->inode),
4474 &data->args.seq_args,
4475 &data->res.seq_res,
4476 task);
4477 }
4478
4479 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4480 {
4481 struct inode *inode = data->inode;
4482
4483 trace_nfs4_commit(data, task->tk_status);
4484 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4485 NULL, NULL) == -EAGAIN) {
4486 rpc_restart_call_prepare(task);
4487 return -EAGAIN;
4488 }
4489 return 0;
4490 }
4491
4492 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4493 {
4494 if (!nfs4_sequence_done(task, &data->res.seq_res))
4495 return -EAGAIN;
4496 return data->commit_done_cb(task, data);
4497 }
4498
4499 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4500 {
4501 struct nfs_server *server = NFS_SERVER(data->inode);
4502
4503 if (data->commit_done_cb == NULL)
4504 data->commit_done_cb = nfs4_commit_done_cb;
4505 data->res.server = server;
4506 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4507 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4508 }
4509
4510 struct nfs4_renewdata {
4511 struct nfs_client *client;
4512 unsigned long timestamp;
4513 };
4514
4515 /*
4516 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4517 * standalone procedure for queueing an asynchronous RENEW.
4518 */
4519 static void nfs4_renew_release(void *calldata)
4520 {
4521 struct nfs4_renewdata *data = calldata;
4522 struct nfs_client *clp = data->client;
4523
4524 if (atomic_read(&clp->cl_count) > 1)
4525 nfs4_schedule_state_renewal(clp);
4526 nfs_put_client(clp);
4527 kfree(data);
4528 }
4529
4530 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4531 {
4532 struct nfs4_renewdata *data = calldata;
4533 struct nfs_client *clp = data->client;
4534 unsigned long timestamp = data->timestamp;
4535
4536 trace_nfs4_renew_async(clp, task->tk_status);
4537 switch (task->tk_status) {
4538 case 0:
4539 break;
4540 case -NFS4ERR_LEASE_MOVED:
4541 nfs4_schedule_lease_moved_recovery(clp);
4542 break;
4543 default:
4544 /* Unless we're shutting down, schedule state recovery! */
4545 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4546 return;
4547 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4548 nfs4_schedule_lease_recovery(clp);
4549 return;
4550 }
4551 nfs4_schedule_path_down_recovery(clp);
4552 }
4553 do_renew_lease(clp, timestamp);
4554 }
4555
4556 static const struct rpc_call_ops nfs4_renew_ops = {
4557 .rpc_call_done = nfs4_renew_done,
4558 .rpc_release = nfs4_renew_release,
4559 };
4560
4561 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4562 {
4563 struct rpc_message msg = {
4564 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4565 .rpc_argp = clp,
4566 .rpc_cred = cred,
4567 };
4568 struct nfs4_renewdata *data;
4569
4570 if (renew_flags == 0)
4571 return 0;
4572 if (!atomic_inc_not_zero(&clp->cl_count))
4573 return -EIO;
4574 data = kmalloc(sizeof(*data), GFP_NOFS);
4575 if (data == NULL)
4576 return -ENOMEM;
4577 data->client = clp;
4578 data->timestamp = jiffies;
4579 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4580 &nfs4_renew_ops, data);
4581 }
4582
4583 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4584 {
4585 struct rpc_message msg = {
4586 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4587 .rpc_argp = clp,
4588 .rpc_cred = cred,
4589 };
4590 unsigned long now = jiffies;
4591 int status;
4592
4593 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4594 if (status < 0)
4595 return status;
4596 do_renew_lease(clp, now);
4597 return 0;
4598 }
4599
4600 static inline int nfs4_server_supports_acls(struct nfs_server *server)
4601 {
4602 return server->caps & NFS_CAP_ACLS;
4603 }
4604
4605 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4606 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4607 * the stack.
4608 */
4609 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4610
4611 static int buf_to_pages_noslab(const void *buf, size_t buflen,
4612 struct page **pages)
4613 {
4614 struct page *newpage, **spages;
4615 int rc = 0;
4616 size_t len;
4617 spages = pages;
4618
4619 do {
4620 len = min_t(size_t, PAGE_SIZE, buflen);
4621 newpage = alloc_page(GFP_KERNEL);
4622
4623 if (newpage == NULL)
4624 goto unwind;
4625 memcpy(page_address(newpage), buf, len);
4626 buf += len;
4627 buflen -= len;
4628 *pages++ = newpage;
4629 rc++;
4630 } while (buflen != 0);
4631
4632 return rc;
4633
4634 unwind:
4635 for(; rc > 0; rc--)
4636 __free_page(spages[rc-1]);
4637 return -ENOMEM;
4638 }
4639
4640 struct nfs4_cached_acl {
4641 int cached;
4642 size_t len;
4643 char data[0];
4644 };
4645
4646 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4647 {
4648 struct nfs_inode *nfsi = NFS_I(inode);
4649
4650 spin_lock(&inode->i_lock);
4651 kfree(nfsi->nfs4_acl);
4652 nfsi->nfs4_acl = acl;
4653 spin_unlock(&inode->i_lock);
4654 }
4655
4656 static void nfs4_zap_acl_attr(struct inode *inode)
4657 {
4658 nfs4_set_cached_acl(inode, NULL);
4659 }
4660
4661 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4662 {
4663 struct nfs_inode *nfsi = NFS_I(inode);
4664 struct nfs4_cached_acl *acl;
4665 int ret = -ENOENT;
4666
4667 spin_lock(&inode->i_lock);
4668 acl = nfsi->nfs4_acl;
4669 if (acl == NULL)
4670 goto out;
4671 if (buf == NULL) /* user is just asking for length */
4672 goto out_len;
4673 if (acl->cached == 0)
4674 goto out;
4675 ret = -ERANGE; /* see getxattr(2) man page */
4676 if (acl->len > buflen)
4677 goto out;
4678 memcpy(buf, acl->data, acl->len);
4679 out_len:
4680 ret = acl->len;
4681 out:
4682 spin_unlock(&inode->i_lock);
4683 return ret;
4684 }
4685
4686 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4687 {
4688 struct nfs4_cached_acl *acl;
4689 size_t buflen = sizeof(*acl) + acl_len;
4690
4691 if (buflen <= PAGE_SIZE) {
4692 acl = kmalloc(buflen, GFP_KERNEL);
4693 if (acl == NULL)
4694 goto out;
4695 acl->cached = 1;
4696 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4697 } else {
4698 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4699 if (acl == NULL)
4700 goto out;
4701 acl->cached = 0;
4702 }
4703 acl->len = acl_len;
4704 out:
4705 nfs4_set_cached_acl(inode, acl);
4706 }
4707
4708 /*
4709 * The getxattr API returns the required buffer length when called with a
4710 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4711 * the required buf. On a NULL buf, we send a page of data to the server
4712 * guessing that the ACL request can be serviced by a page. If so, we cache
4713 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4714 * the cache. If not so, we throw away the page, and cache the required
4715 * length. The next getxattr call will then produce another round trip to
4716 * the server, this time with the input buf of the required size.
4717 */
4718 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4719 {
4720 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4721 struct nfs_getaclargs args = {
4722 .fh = NFS_FH(inode),
4723 .acl_pages = pages,
4724 .acl_len = buflen,
4725 };
4726 struct nfs_getaclres res = {
4727 .acl_len = buflen,
4728 };
4729 struct rpc_message msg = {
4730 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4731 .rpc_argp = &args,
4732 .rpc_resp = &res,
4733 };
4734 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4735 int ret = -ENOMEM, i;
4736
4737 /* As long as we're doing a round trip to the server anyway,
4738 * let's be prepared for a page of acl data. */
4739 if (npages == 0)
4740 npages = 1;
4741 if (npages > ARRAY_SIZE(pages))
4742 return -ERANGE;
4743
4744 for (i = 0; i < npages; i++) {
4745 pages[i] = alloc_page(GFP_KERNEL);
4746 if (!pages[i])
4747 goto out_free;
4748 }
4749
4750 /* for decoding across pages */
4751 res.acl_scratch = alloc_page(GFP_KERNEL);
4752 if (!res.acl_scratch)
4753 goto out_free;
4754
4755 args.acl_len = npages * PAGE_SIZE;
4756
4757 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4758 __func__, buf, buflen, npages, args.acl_len);
4759 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4760 &msg, &args.seq_args, &res.seq_res, 0);
4761 if (ret)
4762 goto out_free;
4763
4764 /* Handle the case where the passed-in buffer is too short */
4765 if (res.acl_flags & NFS4_ACL_TRUNC) {
4766 /* Did the user only issue a request for the acl length? */
4767 if (buf == NULL)
4768 goto out_ok;
4769 ret = -ERANGE;
4770 goto out_free;
4771 }
4772 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4773 if (buf) {
4774 if (res.acl_len > buflen) {
4775 ret = -ERANGE;
4776 goto out_free;
4777 }
4778 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4779 }
4780 out_ok:
4781 ret = res.acl_len;
4782 out_free:
4783 for (i = 0; i < npages; i++)
4784 if (pages[i])
4785 __free_page(pages[i]);
4786 if (res.acl_scratch)
4787 __free_page(res.acl_scratch);
4788 return ret;
4789 }
4790
4791 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4792 {
4793 struct nfs4_exception exception = { };
4794 ssize_t ret;
4795 do {
4796 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4797 trace_nfs4_get_acl(inode, ret);
4798 if (ret >= 0)
4799 break;
4800 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4801 } while (exception.retry);
4802 return ret;
4803 }
4804
4805 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4806 {
4807 struct nfs_server *server = NFS_SERVER(inode);
4808 int ret;
4809
4810 if (!nfs4_server_supports_acls(server))
4811 return -EOPNOTSUPP;
4812 ret = nfs_revalidate_inode(server, inode);
4813 if (ret < 0)
4814 return ret;
4815 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4816 nfs_zap_acl_cache(inode);
4817 ret = nfs4_read_cached_acl(inode, buf, buflen);
4818 if (ret != -ENOENT)
4819 /* -ENOENT is returned if there is no ACL or if there is an ACL
4820 * but no cached acl data, just the acl length */
4821 return ret;
4822 return nfs4_get_acl_uncached(inode, buf, buflen);
4823 }
4824
4825 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4826 {
4827 struct nfs_server *server = NFS_SERVER(inode);
4828 struct page *pages[NFS4ACL_MAXPAGES];
4829 struct nfs_setaclargs arg = {
4830 .fh = NFS_FH(inode),
4831 .acl_pages = pages,
4832 .acl_len = buflen,
4833 };
4834 struct nfs_setaclres res;
4835 struct rpc_message msg = {
4836 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4837 .rpc_argp = &arg,
4838 .rpc_resp = &res,
4839 };
4840 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4841 int ret, i;
4842
4843 if (!nfs4_server_supports_acls(server))
4844 return -EOPNOTSUPP;
4845 if (npages > ARRAY_SIZE(pages))
4846 return -ERANGE;
4847 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages);
4848 if (i < 0)
4849 return i;
4850 nfs4_inode_return_delegation(inode);
4851 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4852
4853 /*
4854 * Free each page after tx, so the only ref left is
4855 * held by the network stack
4856 */
4857 for (; i > 0; i--)
4858 put_page(pages[i-1]);
4859
4860 /*
4861 * Acl update can result in inode attribute update.
4862 * so mark the attribute cache invalid.
4863 */
4864 spin_lock(&inode->i_lock);
4865 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4866 spin_unlock(&inode->i_lock);
4867 nfs_access_zap_cache(inode);
4868 nfs_zap_acl_cache(inode);
4869 return ret;
4870 }
4871
4872 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4873 {
4874 struct nfs4_exception exception = { };
4875 int err;
4876 do {
4877 err = __nfs4_proc_set_acl(inode, buf, buflen);
4878 trace_nfs4_set_acl(inode, err);
4879 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4880 &exception);
4881 } while (exception.retry);
4882 return err;
4883 }
4884
4885 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4886 static int _nfs4_get_security_label(struct inode *inode, void *buf,
4887 size_t buflen)
4888 {
4889 struct nfs_server *server = NFS_SERVER(inode);
4890 struct nfs_fattr fattr;
4891 struct nfs4_label label = {0, 0, buflen, buf};
4892
4893 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4894 struct nfs4_getattr_arg arg = {
4895 .fh = NFS_FH(inode),
4896 .bitmask = bitmask,
4897 };
4898 struct nfs4_getattr_res res = {
4899 .fattr = &fattr,
4900 .label = &label,
4901 .server = server,
4902 };
4903 struct rpc_message msg = {
4904 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4905 .rpc_argp = &arg,
4906 .rpc_resp = &res,
4907 };
4908 int ret;
4909
4910 nfs_fattr_init(&fattr);
4911
4912 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4913 if (ret)
4914 return ret;
4915 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4916 return -ENOENT;
4917 if (buflen < label.len)
4918 return -ERANGE;
4919 return 0;
4920 }
4921
4922 static int nfs4_get_security_label(struct inode *inode, void *buf,
4923 size_t buflen)
4924 {
4925 struct nfs4_exception exception = { };
4926 int err;
4927
4928 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4929 return -EOPNOTSUPP;
4930
4931 do {
4932 err = _nfs4_get_security_label(inode, buf, buflen);
4933 trace_nfs4_get_security_label(inode, err);
4934 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4935 &exception);
4936 } while (exception.retry);
4937 return err;
4938 }
4939
4940 static int _nfs4_do_set_security_label(struct inode *inode,
4941 struct nfs4_label *ilabel,
4942 struct nfs_fattr *fattr,
4943 struct nfs4_label *olabel)
4944 {
4945
4946 struct iattr sattr = {0};
4947 struct nfs_server *server = NFS_SERVER(inode);
4948 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4949 struct nfs_setattrargs arg = {
4950 .fh = NFS_FH(inode),
4951 .iap = &sattr,
4952 .server = server,
4953 .bitmask = bitmask,
4954 .label = ilabel,
4955 };
4956 struct nfs_setattrres res = {
4957 .fattr = fattr,
4958 .label = olabel,
4959 .server = server,
4960 };
4961 struct rpc_message msg = {
4962 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
4963 .rpc_argp = &arg,
4964 .rpc_resp = &res,
4965 };
4966 int status;
4967
4968 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
4969
4970 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4971 if (status)
4972 dprintk("%s failed: %d\n", __func__, status);
4973
4974 return status;
4975 }
4976
4977 static int nfs4_do_set_security_label(struct inode *inode,
4978 struct nfs4_label *ilabel,
4979 struct nfs_fattr *fattr,
4980 struct nfs4_label *olabel)
4981 {
4982 struct nfs4_exception exception = { };
4983 int err;
4984
4985 do {
4986 err = _nfs4_do_set_security_label(inode, ilabel,
4987 fattr, olabel);
4988 trace_nfs4_set_security_label(inode, err);
4989 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4990 &exception);
4991 } while (exception.retry);
4992 return err;
4993 }
4994
4995 static int
4996 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
4997 {
4998 struct nfs4_label ilabel, *olabel = NULL;
4999 struct nfs_fattr fattr;
5000 struct rpc_cred *cred;
5001 struct inode *inode = d_inode(dentry);
5002 int status;
5003
5004 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5005 return -EOPNOTSUPP;
5006
5007 nfs_fattr_init(&fattr);
5008
5009 ilabel.pi = 0;
5010 ilabel.lfs = 0;
5011 ilabel.label = (char *)buf;
5012 ilabel.len = buflen;
5013
5014 cred = rpc_lookup_cred();
5015 if (IS_ERR(cred))
5016 return PTR_ERR(cred);
5017
5018 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
5019 if (IS_ERR(olabel)) {
5020 status = -PTR_ERR(olabel);
5021 goto out;
5022 }
5023
5024 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
5025 if (status == 0)
5026 nfs_setsecurity(inode, &fattr, olabel);
5027
5028 nfs4_label_free(olabel);
5029 out:
5030 put_rpccred(cred);
5031 return status;
5032 }
5033 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
5034
5035
5036 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
5037 nfs4_verifier *bootverf)
5038 {
5039 __be32 verf[2];
5040
5041 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
5042 /* An impossible timestamp guarantees this value
5043 * will never match a generated boot time. */
5044 verf[0] = 0;
5045 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
5046 } else {
5047 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
5048 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
5049 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
5050 }
5051 memcpy(bootverf->data, verf, sizeof(bootverf->data));
5052 }
5053
5054 static int
5055 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
5056 {
5057 size_t len;
5058 char *str;
5059
5060 if (clp->cl_owner_id != NULL)
5061 return 0;
5062
5063 rcu_read_lock();
5064 len = 14 + strlen(clp->cl_ipaddr) + 1 +
5065 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
5066 1 +
5067 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
5068 1;
5069 rcu_read_unlock();
5070
5071 if (len > NFS4_OPAQUE_LIMIT + 1)
5072 return -EINVAL;
5073
5074 /*
5075 * Since this string is allocated at mount time, and held until the
5076 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5077 * about a memory-reclaim deadlock.
5078 */
5079 str = kmalloc(len, GFP_KERNEL);
5080 if (!str)
5081 return -ENOMEM;
5082
5083 rcu_read_lock();
5084 scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
5085 clp->cl_ipaddr,
5086 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
5087 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
5088 rcu_read_unlock();
5089
5090 clp->cl_owner_id = str;
5091 return 0;
5092 }
5093
5094 static int
5095 nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5096 {
5097 size_t len;
5098 char *str;
5099
5100 len = 10 + 10 + 1 + 10 + 1 +
5101 strlen(nfs4_client_id_uniquifier) + 1 +
5102 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5103
5104 if (len > NFS4_OPAQUE_LIMIT + 1)
5105 return -EINVAL;
5106
5107 /*
5108 * Since this string is allocated at mount time, and held until the
5109 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5110 * about a memory-reclaim deadlock.
5111 */
5112 str = kmalloc(len, GFP_KERNEL);
5113 if (!str)
5114 return -ENOMEM;
5115
5116 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5117 clp->rpc_ops->version, clp->cl_minorversion,
5118 nfs4_client_id_uniquifier,
5119 clp->cl_rpcclient->cl_nodename);
5120 clp->cl_owner_id = str;
5121 return 0;
5122 }
5123
5124 static int
5125 nfs4_init_uniform_client_string(struct nfs_client *clp)
5126 {
5127 size_t len;
5128 char *str;
5129
5130 if (clp->cl_owner_id != NULL)
5131 return 0;
5132
5133 if (nfs4_client_id_uniquifier[0] != '\0')
5134 return nfs4_init_uniquifier_client_string(clp);
5135
5136 len = 10 + 10 + 1 + 10 + 1 +
5137 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5138
5139 if (len > NFS4_OPAQUE_LIMIT + 1)
5140 return -EINVAL;
5141
5142 /*
5143 * Since this string is allocated at mount time, and held until the
5144 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5145 * about a memory-reclaim deadlock.
5146 */
5147 str = kmalloc(len, GFP_KERNEL);
5148 if (!str)
5149 return -ENOMEM;
5150
5151 scnprintf(str, len, "Linux NFSv%u.%u %s",
5152 clp->rpc_ops->version, clp->cl_minorversion,
5153 clp->cl_rpcclient->cl_nodename);
5154 clp->cl_owner_id = str;
5155 return 0;
5156 }
5157
5158 /*
5159 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5160 * services. Advertise one based on the address family of the
5161 * clientaddr.
5162 */
5163 static unsigned int
5164 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5165 {
5166 if (strchr(clp->cl_ipaddr, ':') != NULL)
5167 return scnprintf(buf, len, "tcp6");
5168 else
5169 return scnprintf(buf, len, "tcp");
5170 }
5171
5172 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5173 {
5174 struct nfs4_setclientid *sc = calldata;
5175
5176 if (task->tk_status == 0)
5177 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5178 }
5179
5180 static const struct rpc_call_ops nfs4_setclientid_ops = {
5181 .rpc_call_done = nfs4_setclientid_done,
5182 };
5183
5184 /**
5185 * nfs4_proc_setclientid - Negotiate client ID
5186 * @clp: state data structure
5187 * @program: RPC program for NFSv4 callback service
5188 * @port: IP port number for NFS4 callback service
5189 * @cred: RPC credential to use for this call
5190 * @res: where to place the result
5191 *
5192 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5193 */
5194 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5195 unsigned short port, struct rpc_cred *cred,
5196 struct nfs4_setclientid_res *res)
5197 {
5198 nfs4_verifier sc_verifier;
5199 struct nfs4_setclientid setclientid = {
5200 .sc_verifier = &sc_verifier,
5201 .sc_prog = program,
5202 .sc_clnt = clp,
5203 };
5204 struct rpc_message msg = {
5205 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5206 .rpc_argp = &setclientid,
5207 .rpc_resp = res,
5208 .rpc_cred = cred,
5209 };
5210 struct rpc_task *task;
5211 struct rpc_task_setup task_setup_data = {
5212 .rpc_client = clp->cl_rpcclient,
5213 .rpc_message = &msg,
5214 .callback_ops = &nfs4_setclientid_ops,
5215 .callback_data = &setclientid,
5216 .flags = RPC_TASK_TIMEOUT,
5217 };
5218 int status;
5219
5220 /* nfs_client_id4 */
5221 nfs4_init_boot_verifier(clp, &sc_verifier);
5222
5223 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5224 status = nfs4_init_uniform_client_string(clp);
5225 else
5226 status = nfs4_init_nonuniform_client_string(clp);
5227
5228 if (status)
5229 goto out;
5230
5231 /* cb_client4 */
5232 setclientid.sc_netid_len =
5233 nfs4_init_callback_netid(clp,
5234 setclientid.sc_netid,
5235 sizeof(setclientid.sc_netid));
5236 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5237 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5238 clp->cl_ipaddr, port >> 8, port & 255);
5239
5240 dprintk("NFS call setclientid auth=%s, '%s'\n",
5241 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5242 clp->cl_owner_id);
5243 task = rpc_run_task(&task_setup_data);
5244 if (IS_ERR(task)) {
5245 status = PTR_ERR(task);
5246 goto out;
5247 }
5248 status = task->tk_status;
5249 if (setclientid.sc_cred) {
5250 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5251 put_rpccred(setclientid.sc_cred);
5252 }
5253 rpc_put_task(task);
5254 out:
5255 trace_nfs4_setclientid(clp, status);
5256 dprintk("NFS reply setclientid: %d\n", status);
5257 return status;
5258 }
5259
5260 /**
5261 * nfs4_proc_setclientid_confirm - Confirm client ID
5262 * @clp: state data structure
5263 * @res: result of a previous SETCLIENTID
5264 * @cred: RPC credential to use for this call
5265 *
5266 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5267 */
5268 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5269 struct nfs4_setclientid_res *arg,
5270 struct rpc_cred *cred)
5271 {
5272 struct rpc_message msg = {
5273 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5274 .rpc_argp = arg,
5275 .rpc_cred = cred,
5276 };
5277 int status;
5278
5279 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5280 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5281 clp->cl_clientid);
5282 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5283 trace_nfs4_setclientid_confirm(clp, status);
5284 dprintk("NFS reply setclientid_confirm: %d\n", status);
5285 return status;
5286 }
5287
5288 struct nfs4_delegreturndata {
5289 struct nfs4_delegreturnargs args;
5290 struct nfs4_delegreturnres res;
5291 struct nfs_fh fh;
5292 nfs4_stateid stateid;
5293 unsigned long timestamp;
5294 struct nfs_fattr fattr;
5295 int rpc_status;
5296 struct inode *inode;
5297 bool roc;
5298 u32 roc_barrier;
5299 };
5300
5301 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5302 {
5303 struct nfs4_delegreturndata *data = calldata;
5304
5305 if (!nfs4_sequence_done(task, &data->res.seq_res))
5306 return;
5307
5308 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5309 switch (task->tk_status) {
5310 case 0:
5311 renew_lease(data->res.server, data->timestamp);
5312 case -NFS4ERR_ADMIN_REVOKED:
5313 case -NFS4ERR_DELEG_REVOKED:
5314 case -NFS4ERR_BAD_STATEID:
5315 case -NFS4ERR_OLD_STATEID:
5316 case -NFS4ERR_STALE_STATEID:
5317 case -NFS4ERR_EXPIRED:
5318 task->tk_status = 0;
5319 if (data->roc)
5320 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5321 break;
5322 default:
5323 if (nfs4_async_handle_error(task, data->res.server,
5324 NULL, NULL) == -EAGAIN) {
5325 rpc_restart_call_prepare(task);
5326 return;
5327 }
5328 }
5329 data->rpc_status = task->tk_status;
5330 }
5331
5332 static void nfs4_delegreturn_release(void *calldata)
5333 {
5334 struct nfs4_delegreturndata *data = calldata;
5335 struct inode *inode = data->inode;
5336
5337 if (inode) {
5338 if (data->roc)
5339 pnfs_roc_release(inode);
5340 nfs_iput_and_deactive(inode);
5341 }
5342 kfree(calldata);
5343 }
5344
5345 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5346 {
5347 struct nfs4_delegreturndata *d_data;
5348
5349 d_data = (struct nfs4_delegreturndata *)data;
5350
5351 if (nfs4_wait_on_layoutreturn(d_data->inode, task))
5352 return;
5353
5354 if (d_data->roc)
5355 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
5356
5357 nfs4_setup_sequence(d_data->res.server,
5358 &d_data->args.seq_args,
5359 &d_data->res.seq_res,
5360 task);
5361 }
5362
5363 static const struct rpc_call_ops nfs4_delegreturn_ops = {
5364 .rpc_call_prepare = nfs4_delegreturn_prepare,
5365 .rpc_call_done = nfs4_delegreturn_done,
5366 .rpc_release = nfs4_delegreturn_release,
5367 };
5368
5369 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5370 {
5371 struct nfs4_delegreturndata *data;
5372 struct nfs_server *server = NFS_SERVER(inode);
5373 struct rpc_task *task;
5374 struct rpc_message msg = {
5375 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5376 .rpc_cred = cred,
5377 };
5378 struct rpc_task_setup task_setup_data = {
5379 .rpc_client = server->client,
5380 .rpc_message = &msg,
5381 .callback_ops = &nfs4_delegreturn_ops,
5382 .flags = RPC_TASK_ASYNC,
5383 };
5384 int status = 0;
5385
5386 data = kzalloc(sizeof(*data), GFP_NOFS);
5387 if (data == NULL)
5388 return -ENOMEM;
5389 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5390
5391 nfs4_state_protect(server->nfs_client,
5392 NFS_SP4_MACH_CRED_CLEANUP,
5393 &task_setup_data.rpc_client, &msg);
5394
5395 data->args.fhandle = &data->fh;
5396 data->args.stateid = &data->stateid;
5397 data->args.bitmask = server->cache_consistency_bitmask;
5398 nfs_copy_fh(&data->fh, NFS_FH(inode));
5399 nfs4_stateid_copy(&data->stateid, stateid);
5400 data->res.fattr = &data->fattr;
5401 data->res.server = server;
5402 nfs_fattr_init(data->res.fattr);
5403 data->timestamp = jiffies;
5404 data->rpc_status = 0;
5405 data->inode = nfs_igrab_and_active(inode);
5406 if (data->inode)
5407 data->roc = nfs4_roc(inode);
5408
5409 task_setup_data.callback_data = data;
5410 msg.rpc_argp = &data->args;
5411 msg.rpc_resp = &data->res;
5412 task = rpc_run_task(&task_setup_data);
5413 if (IS_ERR(task))
5414 return PTR_ERR(task);
5415 if (!issync)
5416 goto out;
5417 status = nfs4_wait_for_completion_rpc_task(task);
5418 if (status != 0)
5419 goto out;
5420 status = data->rpc_status;
5421 if (status == 0)
5422 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5423 else
5424 nfs_refresh_inode(inode, &data->fattr);
5425 out:
5426 rpc_put_task(task);
5427 return status;
5428 }
5429
5430 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5431 {
5432 struct nfs_server *server = NFS_SERVER(inode);
5433 struct nfs4_exception exception = { };
5434 int err;
5435 do {
5436 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5437 trace_nfs4_delegreturn(inode, stateid, err);
5438 switch (err) {
5439 case -NFS4ERR_STALE_STATEID:
5440 case -NFS4ERR_EXPIRED:
5441 case 0:
5442 return 0;
5443 }
5444 err = nfs4_handle_exception(server, err, &exception);
5445 } while (exception.retry);
5446 return err;
5447 }
5448
5449 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5450 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5451
5452 /*
5453 * sleep, with exponential backoff, and retry the LOCK operation.
5454 */
5455 static unsigned long
5456 nfs4_set_lock_task_retry(unsigned long timeout)
5457 {
5458 freezable_schedule_timeout_killable_unsafe(timeout);
5459 timeout <<= 1;
5460 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5461 return NFS4_LOCK_MAXTIMEOUT;
5462 return timeout;
5463 }
5464
5465 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5466 {
5467 struct inode *inode = state->inode;
5468 struct nfs_server *server = NFS_SERVER(inode);
5469 struct nfs_client *clp = server->nfs_client;
5470 struct nfs_lockt_args arg = {
5471 .fh = NFS_FH(inode),
5472 .fl = request,
5473 };
5474 struct nfs_lockt_res res = {
5475 .denied = request,
5476 };
5477 struct rpc_message msg = {
5478 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5479 .rpc_argp = &arg,
5480 .rpc_resp = &res,
5481 .rpc_cred = state->owner->so_cred,
5482 };
5483 struct nfs4_lock_state *lsp;
5484 int status;
5485
5486 arg.lock_owner.clientid = clp->cl_clientid;
5487 status = nfs4_set_lock_state(state, request);
5488 if (status != 0)
5489 goto out;
5490 lsp = request->fl_u.nfs4_fl.owner;
5491 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5492 arg.lock_owner.s_dev = server->s_dev;
5493 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5494 switch (status) {
5495 case 0:
5496 request->fl_type = F_UNLCK;
5497 break;
5498 case -NFS4ERR_DENIED:
5499 status = 0;
5500 }
5501 request->fl_ops->fl_release_private(request);
5502 request->fl_ops = NULL;
5503 out:
5504 return status;
5505 }
5506
5507 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5508 {
5509 struct nfs4_exception exception = { };
5510 int err;
5511
5512 do {
5513 err = _nfs4_proc_getlk(state, cmd, request);
5514 trace_nfs4_get_lock(request, state, cmd, err);
5515 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5516 &exception);
5517 } while (exception.retry);
5518 return err;
5519 }
5520
5521 static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
5522 {
5523 return locks_lock_inode_wait(inode, fl);
5524 }
5525
5526 struct nfs4_unlockdata {
5527 struct nfs_locku_args arg;
5528 struct nfs_locku_res res;
5529 struct nfs4_lock_state *lsp;
5530 struct nfs_open_context *ctx;
5531 struct file_lock fl;
5532 struct nfs_server *server;
5533 unsigned long timestamp;
5534 };
5535
5536 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5537 struct nfs_open_context *ctx,
5538 struct nfs4_lock_state *lsp,
5539 struct nfs_seqid *seqid)
5540 {
5541 struct nfs4_unlockdata *p;
5542 struct inode *inode = lsp->ls_state->inode;
5543
5544 p = kzalloc(sizeof(*p), GFP_NOFS);
5545 if (p == NULL)
5546 return NULL;
5547 p->arg.fh = NFS_FH(inode);
5548 p->arg.fl = &p->fl;
5549 p->arg.seqid = seqid;
5550 p->res.seqid = seqid;
5551 p->lsp = lsp;
5552 atomic_inc(&lsp->ls_count);
5553 /* Ensure we don't close file until we're done freeing locks! */
5554 p->ctx = get_nfs_open_context(ctx);
5555 memcpy(&p->fl, fl, sizeof(p->fl));
5556 p->server = NFS_SERVER(inode);
5557 return p;
5558 }
5559
5560 static void nfs4_locku_release_calldata(void *data)
5561 {
5562 struct nfs4_unlockdata *calldata = data;
5563 nfs_free_seqid(calldata->arg.seqid);
5564 nfs4_put_lock_state(calldata->lsp);
5565 put_nfs_open_context(calldata->ctx);
5566 kfree(calldata);
5567 }
5568
5569 static void nfs4_locku_done(struct rpc_task *task, void *data)
5570 {
5571 struct nfs4_unlockdata *calldata = data;
5572
5573 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5574 return;
5575 switch (task->tk_status) {
5576 case 0:
5577 renew_lease(calldata->server, calldata->timestamp);
5578 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
5579 if (nfs4_update_lock_stateid(calldata->lsp,
5580 &calldata->res.stateid))
5581 break;
5582 case -NFS4ERR_BAD_STATEID:
5583 case -NFS4ERR_OLD_STATEID:
5584 case -NFS4ERR_STALE_STATEID:
5585 case -NFS4ERR_EXPIRED:
5586 if (!nfs4_stateid_match(&calldata->arg.stateid,
5587 &calldata->lsp->ls_stateid))
5588 rpc_restart_call_prepare(task);
5589 break;
5590 default:
5591 if (nfs4_async_handle_error(task, calldata->server,
5592 NULL, NULL) == -EAGAIN)
5593 rpc_restart_call_prepare(task);
5594 }
5595 nfs_release_seqid(calldata->arg.seqid);
5596 }
5597
5598 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5599 {
5600 struct nfs4_unlockdata *calldata = data;
5601
5602 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5603 goto out_wait;
5604 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5605 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5606 /* Note: exit _without_ running nfs4_locku_done */
5607 goto out_no_action;
5608 }
5609 calldata->timestamp = jiffies;
5610 if (nfs4_setup_sequence(calldata->server,
5611 &calldata->arg.seq_args,
5612 &calldata->res.seq_res,
5613 task) != 0)
5614 nfs_release_seqid(calldata->arg.seqid);
5615 return;
5616 out_no_action:
5617 task->tk_action = NULL;
5618 out_wait:
5619 nfs4_sequence_done(task, &calldata->res.seq_res);
5620 }
5621
5622 static const struct rpc_call_ops nfs4_locku_ops = {
5623 .rpc_call_prepare = nfs4_locku_prepare,
5624 .rpc_call_done = nfs4_locku_done,
5625 .rpc_release = nfs4_locku_release_calldata,
5626 };
5627
5628 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5629 struct nfs_open_context *ctx,
5630 struct nfs4_lock_state *lsp,
5631 struct nfs_seqid *seqid)
5632 {
5633 struct nfs4_unlockdata *data;
5634 struct rpc_message msg = {
5635 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5636 .rpc_cred = ctx->cred,
5637 };
5638 struct rpc_task_setup task_setup_data = {
5639 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5640 .rpc_message = &msg,
5641 .callback_ops = &nfs4_locku_ops,
5642 .workqueue = nfsiod_workqueue,
5643 .flags = RPC_TASK_ASYNC,
5644 };
5645
5646 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5647 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5648
5649 /* Ensure this is an unlock - when canceling a lock, the
5650 * canceled lock is passed in, and it won't be an unlock.
5651 */
5652 fl->fl_type = F_UNLCK;
5653
5654 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5655 if (data == NULL) {
5656 nfs_free_seqid(seqid);
5657 return ERR_PTR(-ENOMEM);
5658 }
5659
5660 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5661 msg.rpc_argp = &data->arg;
5662 msg.rpc_resp = &data->res;
5663 task_setup_data.callback_data = data;
5664 return rpc_run_task(&task_setup_data);
5665 }
5666
5667 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5668 {
5669 struct inode *inode = state->inode;
5670 struct nfs4_state_owner *sp = state->owner;
5671 struct nfs_inode *nfsi = NFS_I(inode);
5672 struct nfs_seqid *seqid;
5673 struct nfs4_lock_state *lsp;
5674 struct rpc_task *task;
5675 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5676 int status = 0;
5677 unsigned char fl_flags = request->fl_flags;
5678
5679 status = nfs4_set_lock_state(state, request);
5680 /* Unlock _before_ we do the RPC call */
5681 request->fl_flags |= FL_EXISTS;
5682 /* Exclude nfs_delegation_claim_locks() */
5683 mutex_lock(&sp->so_delegreturn_mutex);
5684 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5685 down_read(&nfsi->rwsem);
5686 if (do_vfs_lock(inode, request) == -ENOENT) {
5687 up_read(&nfsi->rwsem);
5688 mutex_unlock(&sp->so_delegreturn_mutex);
5689 goto out;
5690 }
5691 up_read(&nfsi->rwsem);
5692 mutex_unlock(&sp->so_delegreturn_mutex);
5693 if (status != 0)
5694 goto out;
5695 /* Is this a delegated lock? */
5696 lsp = request->fl_u.nfs4_fl.owner;
5697 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5698 goto out;
5699 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5700 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5701 status = -ENOMEM;
5702 if (IS_ERR(seqid))
5703 goto out;
5704 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5705 status = PTR_ERR(task);
5706 if (IS_ERR(task))
5707 goto out;
5708 status = nfs4_wait_for_completion_rpc_task(task);
5709 rpc_put_task(task);
5710 out:
5711 request->fl_flags = fl_flags;
5712 trace_nfs4_unlock(request, state, F_SETLK, status);
5713 return status;
5714 }
5715
5716 struct nfs4_lockdata {
5717 struct nfs_lock_args arg;
5718 struct nfs_lock_res res;
5719 struct nfs4_lock_state *lsp;
5720 struct nfs_open_context *ctx;
5721 struct file_lock fl;
5722 unsigned long timestamp;
5723 int rpc_status;
5724 int cancelled;
5725 struct nfs_server *server;
5726 };
5727
5728 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5729 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5730 gfp_t gfp_mask)
5731 {
5732 struct nfs4_lockdata *p;
5733 struct inode *inode = lsp->ls_state->inode;
5734 struct nfs_server *server = NFS_SERVER(inode);
5735 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5736
5737 p = kzalloc(sizeof(*p), gfp_mask);
5738 if (p == NULL)
5739 return NULL;
5740
5741 p->arg.fh = NFS_FH(inode);
5742 p->arg.fl = &p->fl;
5743 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5744 if (IS_ERR(p->arg.open_seqid))
5745 goto out_free;
5746 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5747 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5748 if (IS_ERR(p->arg.lock_seqid))
5749 goto out_free_seqid;
5750 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5751 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5752 p->arg.lock_owner.s_dev = server->s_dev;
5753 p->res.lock_seqid = p->arg.lock_seqid;
5754 p->lsp = lsp;
5755 p->server = server;
5756 atomic_inc(&lsp->ls_count);
5757 p->ctx = get_nfs_open_context(ctx);
5758 get_file(fl->fl_file);
5759 memcpy(&p->fl, fl, sizeof(p->fl));
5760 return p;
5761 out_free_seqid:
5762 nfs_free_seqid(p->arg.open_seqid);
5763 out_free:
5764 kfree(p);
5765 return NULL;
5766 }
5767
5768 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5769 {
5770 struct nfs4_lockdata *data = calldata;
5771 struct nfs4_state *state = data->lsp->ls_state;
5772
5773 dprintk("%s: begin!\n", __func__);
5774 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5775 goto out_wait;
5776 /* Do we need to do an open_to_lock_owner? */
5777 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5778 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5779 goto out_release_lock_seqid;
5780 }
5781 nfs4_stateid_copy(&data->arg.open_stateid,
5782 &state->open_stateid);
5783 data->arg.new_lock_owner = 1;
5784 data->res.open_seqid = data->arg.open_seqid;
5785 } else {
5786 data->arg.new_lock_owner = 0;
5787 nfs4_stateid_copy(&data->arg.lock_stateid,
5788 &data->lsp->ls_stateid);
5789 }
5790 if (!nfs4_valid_open_stateid(state)) {
5791 data->rpc_status = -EBADF;
5792 task->tk_action = NULL;
5793 goto out_release_open_seqid;
5794 }
5795 data->timestamp = jiffies;
5796 if (nfs4_setup_sequence(data->server,
5797 &data->arg.seq_args,
5798 &data->res.seq_res,
5799 task) == 0)
5800 return;
5801 out_release_open_seqid:
5802 nfs_release_seqid(data->arg.open_seqid);
5803 out_release_lock_seqid:
5804 nfs_release_seqid(data->arg.lock_seqid);
5805 out_wait:
5806 nfs4_sequence_done(task, &data->res.seq_res);
5807 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5808 }
5809
5810 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5811 {
5812 struct nfs4_lockdata *data = calldata;
5813 struct nfs4_lock_state *lsp = data->lsp;
5814
5815 dprintk("%s: begin!\n", __func__);
5816
5817 if (!nfs4_sequence_done(task, &data->res.seq_res))
5818 return;
5819
5820 data->rpc_status = task->tk_status;
5821 switch (task->tk_status) {
5822 case 0:
5823 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
5824 data->timestamp);
5825 if (data->arg.new_lock) {
5826 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5827 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
5828 rpc_restart_call_prepare(task);
5829 break;
5830 }
5831 }
5832 if (data->arg.new_lock_owner != 0) {
5833 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5834 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5835 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5836 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5837 rpc_restart_call_prepare(task);
5838 break;
5839 case -NFS4ERR_BAD_STATEID:
5840 case -NFS4ERR_OLD_STATEID:
5841 case -NFS4ERR_STALE_STATEID:
5842 case -NFS4ERR_EXPIRED:
5843 if (data->arg.new_lock_owner != 0) {
5844 if (!nfs4_stateid_match(&data->arg.open_stateid,
5845 &lsp->ls_state->open_stateid))
5846 rpc_restart_call_prepare(task);
5847 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5848 &lsp->ls_stateid))
5849 rpc_restart_call_prepare(task);
5850 }
5851 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5852 }
5853
5854 static void nfs4_lock_release(void *calldata)
5855 {
5856 struct nfs4_lockdata *data = calldata;
5857
5858 dprintk("%s: begin!\n", __func__);
5859 nfs_free_seqid(data->arg.open_seqid);
5860 if (data->cancelled != 0) {
5861 struct rpc_task *task;
5862 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5863 data->arg.lock_seqid);
5864 if (!IS_ERR(task))
5865 rpc_put_task_async(task);
5866 dprintk("%s: cancelling lock!\n", __func__);
5867 } else
5868 nfs_free_seqid(data->arg.lock_seqid);
5869 nfs4_put_lock_state(data->lsp);
5870 put_nfs_open_context(data->ctx);
5871 fput(data->fl.fl_file);
5872 kfree(data);
5873 dprintk("%s: done!\n", __func__);
5874 }
5875
5876 static const struct rpc_call_ops nfs4_lock_ops = {
5877 .rpc_call_prepare = nfs4_lock_prepare,
5878 .rpc_call_done = nfs4_lock_done,
5879 .rpc_release = nfs4_lock_release,
5880 };
5881
5882 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5883 {
5884 switch (error) {
5885 case -NFS4ERR_ADMIN_REVOKED:
5886 case -NFS4ERR_BAD_STATEID:
5887 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5888 if (new_lock_owner != 0 ||
5889 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5890 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5891 break;
5892 case -NFS4ERR_STALE_STATEID:
5893 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5894 case -NFS4ERR_EXPIRED:
5895 nfs4_schedule_lease_recovery(server->nfs_client);
5896 };
5897 }
5898
5899 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5900 {
5901 struct nfs4_lockdata *data;
5902 struct rpc_task *task;
5903 struct rpc_message msg = {
5904 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5905 .rpc_cred = state->owner->so_cred,
5906 };
5907 struct rpc_task_setup task_setup_data = {
5908 .rpc_client = NFS_CLIENT(state->inode),
5909 .rpc_message = &msg,
5910 .callback_ops = &nfs4_lock_ops,
5911 .workqueue = nfsiod_workqueue,
5912 .flags = RPC_TASK_ASYNC,
5913 };
5914 int ret;
5915
5916 dprintk("%s: begin!\n", __func__);
5917 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
5918 fl->fl_u.nfs4_fl.owner,
5919 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
5920 if (data == NULL)
5921 return -ENOMEM;
5922 if (IS_SETLKW(cmd))
5923 data->arg.block = 1;
5924 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5925 msg.rpc_argp = &data->arg;
5926 msg.rpc_resp = &data->res;
5927 task_setup_data.callback_data = data;
5928 if (recovery_type > NFS_LOCK_NEW) {
5929 if (recovery_type == NFS_LOCK_RECLAIM)
5930 data->arg.reclaim = NFS_LOCK_RECLAIM;
5931 nfs4_set_sequence_privileged(&data->arg.seq_args);
5932 } else
5933 data->arg.new_lock = 1;
5934 task = rpc_run_task(&task_setup_data);
5935 if (IS_ERR(task))
5936 return PTR_ERR(task);
5937 ret = nfs4_wait_for_completion_rpc_task(task);
5938 if (ret == 0) {
5939 ret = data->rpc_status;
5940 if (ret)
5941 nfs4_handle_setlk_error(data->server, data->lsp,
5942 data->arg.new_lock_owner, ret);
5943 } else
5944 data->cancelled = 1;
5945 rpc_put_task(task);
5946 dprintk("%s: done, ret = %d!\n", __func__, ret);
5947 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
5948 return ret;
5949 }
5950
5951 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
5952 {
5953 struct nfs_server *server = NFS_SERVER(state->inode);
5954 struct nfs4_exception exception = {
5955 .inode = state->inode,
5956 };
5957 int err;
5958
5959 do {
5960 /* Cache the lock if possible... */
5961 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5962 return 0;
5963 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5964 if (err != -NFS4ERR_DELAY)
5965 break;
5966 nfs4_handle_exception(server, err, &exception);
5967 } while (exception.retry);
5968 return err;
5969 }
5970
5971 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5972 {
5973 struct nfs_server *server = NFS_SERVER(state->inode);
5974 struct nfs4_exception exception = {
5975 .inode = state->inode,
5976 };
5977 int err;
5978
5979 err = nfs4_set_lock_state(state, request);
5980 if (err != 0)
5981 return err;
5982 if (!recover_lost_locks) {
5983 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
5984 return 0;
5985 }
5986 do {
5987 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5988 return 0;
5989 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5990 switch (err) {
5991 default:
5992 goto out;
5993 case -NFS4ERR_GRACE:
5994 case -NFS4ERR_DELAY:
5995 nfs4_handle_exception(server, err, &exception);
5996 err = 0;
5997 }
5998 } while (exception.retry);
5999 out:
6000 return err;
6001 }
6002
6003 #if defined(CONFIG_NFS_V4_1)
6004 /**
6005 * nfs41_check_expired_locks - possibly free a lock stateid
6006 *
6007 * @state: NFSv4 state for an inode
6008 *
6009 * Returns NFS_OK if recovery for this stateid is now finished.
6010 * Otherwise a negative NFS4ERR value is returned.
6011 */
6012 static int nfs41_check_expired_locks(struct nfs4_state *state)
6013 {
6014 int status, ret = -NFS4ERR_BAD_STATEID;
6015 struct nfs4_lock_state *lsp;
6016 struct nfs_server *server = NFS_SERVER(state->inode);
6017
6018 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
6019 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
6020 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
6021
6022 status = nfs41_test_stateid(server,
6023 &lsp->ls_stateid,
6024 cred);
6025 trace_nfs4_test_lock_stateid(state, lsp, status);
6026 if (status != NFS_OK) {
6027 /* Free the stateid unless the server
6028 * informs us the stateid is unrecognized. */
6029 if (status != -NFS4ERR_BAD_STATEID)
6030 nfs41_free_stateid(server,
6031 &lsp->ls_stateid,
6032 cred);
6033 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6034 ret = status;
6035 }
6036 }
6037 };
6038
6039 return ret;
6040 }
6041
6042 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
6043 {
6044 int status = NFS_OK;
6045
6046 if (test_bit(LK_STATE_IN_USE, &state->flags))
6047 status = nfs41_check_expired_locks(state);
6048 if (status != NFS_OK)
6049 status = nfs4_lock_expired(state, request);
6050 return status;
6051 }
6052 #endif
6053
6054 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6055 {
6056 struct nfs_inode *nfsi = NFS_I(state->inode);
6057 unsigned char fl_flags = request->fl_flags;
6058 int status = -ENOLCK;
6059
6060 if ((fl_flags & FL_POSIX) &&
6061 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6062 goto out;
6063 /* Is this a delegated open? */
6064 status = nfs4_set_lock_state(state, request);
6065 if (status != 0)
6066 goto out;
6067 request->fl_flags |= FL_ACCESS;
6068 status = do_vfs_lock(state->inode, request);
6069 if (status < 0)
6070 goto out;
6071 down_read(&nfsi->rwsem);
6072 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6073 /* Yes: cache locks! */
6074 /* ...but avoid races with delegation recall... */
6075 request->fl_flags = fl_flags & ~FL_SLEEP;
6076 status = do_vfs_lock(state->inode, request);
6077 up_read(&nfsi->rwsem);
6078 goto out;
6079 }
6080 up_read(&nfsi->rwsem);
6081 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6082 out:
6083 request->fl_flags = fl_flags;
6084 return status;
6085 }
6086
6087 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6088 {
6089 struct nfs4_exception exception = {
6090 .state = state,
6091 .inode = state->inode,
6092 };
6093 int err;
6094
6095 do {
6096 err = _nfs4_proc_setlk(state, cmd, request);
6097 if (err == -NFS4ERR_DENIED)
6098 err = -EAGAIN;
6099 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6100 err, &exception);
6101 } while (exception.retry);
6102 return err;
6103 }
6104
6105 static int
6106 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6107 {
6108 struct nfs_open_context *ctx;
6109 struct nfs4_state *state;
6110 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6111 int status;
6112
6113 /* verify open state */
6114 ctx = nfs_file_open_context(filp);
6115 state = ctx->state;
6116
6117 if (request->fl_start < 0 || request->fl_end < 0)
6118 return -EINVAL;
6119
6120 if (IS_GETLK(cmd)) {
6121 if (state != NULL)
6122 return nfs4_proc_getlk(state, F_GETLK, request);
6123 return 0;
6124 }
6125
6126 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6127 return -EINVAL;
6128
6129 if (request->fl_type == F_UNLCK) {
6130 if (state != NULL)
6131 return nfs4_proc_unlck(state, cmd, request);
6132 return 0;
6133 }
6134
6135 if (state == NULL)
6136 return -ENOLCK;
6137 /*
6138 * Don't rely on the VFS having checked the file open mode,
6139 * since it won't do this for flock() locks.
6140 */
6141 switch (request->fl_type) {
6142 case F_RDLCK:
6143 if (!(filp->f_mode & FMODE_READ))
6144 return -EBADF;
6145 break;
6146 case F_WRLCK:
6147 if (!(filp->f_mode & FMODE_WRITE))
6148 return -EBADF;
6149 }
6150
6151 do {
6152 status = nfs4_proc_setlk(state, cmd, request);
6153 if ((status != -EAGAIN) || IS_SETLK(cmd))
6154 break;
6155 timeout = nfs4_set_lock_task_retry(timeout);
6156 status = -ERESTARTSYS;
6157 if (signalled())
6158 break;
6159 } while(status < 0);
6160 return status;
6161 }
6162
6163 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6164 {
6165 struct nfs_server *server = NFS_SERVER(state->inode);
6166 int err;
6167
6168 err = nfs4_set_lock_state(state, fl);
6169 if (err != 0)
6170 return err;
6171 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6172 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6173 }
6174
6175 struct nfs_release_lockowner_data {
6176 struct nfs4_lock_state *lsp;
6177 struct nfs_server *server;
6178 struct nfs_release_lockowner_args args;
6179 struct nfs_release_lockowner_res res;
6180 unsigned long timestamp;
6181 };
6182
6183 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6184 {
6185 struct nfs_release_lockowner_data *data = calldata;
6186 struct nfs_server *server = data->server;
6187 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6188 &data->args.seq_args, &data->res.seq_res, task);
6189 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6190 data->timestamp = jiffies;
6191 }
6192
6193 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6194 {
6195 struct nfs_release_lockowner_data *data = calldata;
6196 struct nfs_server *server = data->server;
6197
6198 nfs40_sequence_done(task, &data->res.seq_res);
6199
6200 switch (task->tk_status) {
6201 case 0:
6202 renew_lease(server, data->timestamp);
6203 break;
6204 case -NFS4ERR_STALE_CLIENTID:
6205 case -NFS4ERR_EXPIRED:
6206 nfs4_schedule_lease_recovery(server->nfs_client);
6207 break;
6208 case -NFS4ERR_LEASE_MOVED:
6209 case -NFS4ERR_DELAY:
6210 if (nfs4_async_handle_error(task, server,
6211 NULL, NULL) == -EAGAIN)
6212 rpc_restart_call_prepare(task);
6213 }
6214 }
6215
6216 static void nfs4_release_lockowner_release(void *calldata)
6217 {
6218 struct nfs_release_lockowner_data *data = calldata;
6219 nfs4_free_lock_state(data->server, data->lsp);
6220 kfree(calldata);
6221 }
6222
6223 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6224 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6225 .rpc_call_done = nfs4_release_lockowner_done,
6226 .rpc_release = nfs4_release_lockowner_release,
6227 };
6228
6229 static void
6230 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6231 {
6232 struct nfs_release_lockowner_data *data;
6233 struct rpc_message msg = {
6234 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6235 };
6236
6237 if (server->nfs_client->cl_mvops->minor_version != 0)
6238 return;
6239
6240 data = kmalloc(sizeof(*data), GFP_NOFS);
6241 if (!data)
6242 return;
6243 data->lsp = lsp;
6244 data->server = server;
6245 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6246 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6247 data->args.lock_owner.s_dev = server->s_dev;
6248
6249 msg.rpc_argp = &data->args;
6250 msg.rpc_resp = &data->res;
6251 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6252 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6253 }
6254
6255 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6256
6257 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
6258 struct dentry *dentry, const char *key,
6259 const void *buf, size_t buflen,
6260 int flags)
6261 {
6262 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
6263 }
6264
6265 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
6266 struct dentry *unused, struct inode *inode,
6267 const char *key, void *buf, size_t buflen)
6268 {
6269 return nfs4_proc_get_acl(inode, buf, buflen);
6270 }
6271
6272 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
6273 {
6274 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
6275 }
6276
6277 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6278
6279 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
6280 struct dentry *dentry, const char *key,
6281 const void *buf, size_t buflen,
6282 int flags)
6283 {
6284 if (security_ismaclabel(key))
6285 return nfs4_set_security_label(dentry, buf, buflen);
6286
6287 return -EOPNOTSUPP;
6288 }
6289
6290 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
6291 struct dentry *unused, struct inode *inode,
6292 const char *key, void *buf, size_t buflen)
6293 {
6294 if (security_ismaclabel(key))
6295 return nfs4_get_security_label(inode, buf, buflen);
6296 return -EOPNOTSUPP;
6297 }
6298
6299 static ssize_t
6300 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
6301 {
6302 int len = 0;
6303
6304 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
6305 len = security_inode_listsecurity(inode, list, list_len);
6306 if (list_len && len > list_len)
6307 return -ERANGE;
6308 }
6309 return len;
6310 }
6311
6312 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6313 .prefix = XATTR_SECURITY_PREFIX,
6314 .get = nfs4_xattr_get_nfs4_label,
6315 .set = nfs4_xattr_set_nfs4_label,
6316 };
6317
6318 #else
6319
6320 static ssize_t
6321 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
6322 {
6323 return 0;
6324 }
6325
6326 #endif
6327
6328 /*
6329 * nfs_fhget will use either the mounted_on_fileid or the fileid
6330 */
6331 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6332 {
6333 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6334 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6335 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6336 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6337 return;
6338
6339 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6340 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6341 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6342 fattr->nlink = 2;
6343 }
6344
6345 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6346 const struct qstr *name,
6347 struct nfs4_fs_locations *fs_locations,
6348 struct page *page)
6349 {
6350 struct nfs_server *server = NFS_SERVER(dir);
6351 u32 bitmask[3] = {
6352 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6353 };
6354 struct nfs4_fs_locations_arg args = {
6355 .dir_fh = NFS_FH(dir),
6356 .name = name,
6357 .page = page,
6358 .bitmask = bitmask,
6359 };
6360 struct nfs4_fs_locations_res res = {
6361 .fs_locations = fs_locations,
6362 };
6363 struct rpc_message msg = {
6364 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6365 .rpc_argp = &args,
6366 .rpc_resp = &res,
6367 };
6368 int status;
6369
6370 dprintk("%s: start\n", __func__);
6371
6372 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6373 * is not supported */
6374 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6375 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6376 else
6377 bitmask[0] |= FATTR4_WORD0_FILEID;
6378
6379 nfs_fattr_init(&fs_locations->fattr);
6380 fs_locations->server = server;
6381 fs_locations->nlocations = 0;
6382 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6383 dprintk("%s: returned status = %d\n", __func__, status);
6384 return status;
6385 }
6386
6387 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6388 const struct qstr *name,
6389 struct nfs4_fs_locations *fs_locations,
6390 struct page *page)
6391 {
6392 struct nfs4_exception exception = { };
6393 int err;
6394 do {
6395 err = _nfs4_proc_fs_locations(client, dir, name,
6396 fs_locations, page);
6397 trace_nfs4_get_fs_locations(dir, name, err);
6398 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6399 &exception);
6400 } while (exception.retry);
6401 return err;
6402 }
6403
6404 /*
6405 * This operation also signals the server that this client is
6406 * performing migration recovery. The server can stop returning
6407 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6408 * appended to this compound to identify the client ID which is
6409 * performing recovery.
6410 */
6411 static int _nfs40_proc_get_locations(struct inode *inode,
6412 struct nfs4_fs_locations *locations,
6413 struct page *page, struct rpc_cred *cred)
6414 {
6415 struct nfs_server *server = NFS_SERVER(inode);
6416 struct rpc_clnt *clnt = server->client;
6417 u32 bitmask[2] = {
6418 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6419 };
6420 struct nfs4_fs_locations_arg args = {
6421 .clientid = server->nfs_client->cl_clientid,
6422 .fh = NFS_FH(inode),
6423 .page = page,
6424 .bitmask = bitmask,
6425 .migration = 1, /* skip LOOKUP */
6426 .renew = 1, /* append RENEW */
6427 };
6428 struct nfs4_fs_locations_res res = {
6429 .fs_locations = locations,
6430 .migration = 1,
6431 .renew = 1,
6432 };
6433 struct rpc_message msg = {
6434 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6435 .rpc_argp = &args,
6436 .rpc_resp = &res,
6437 .rpc_cred = cred,
6438 };
6439 unsigned long now = jiffies;
6440 int status;
6441
6442 nfs_fattr_init(&locations->fattr);
6443 locations->server = server;
6444 locations->nlocations = 0;
6445
6446 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6447 nfs4_set_sequence_privileged(&args.seq_args);
6448 status = nfs4_call_sync_sequence(clnt, server, &msg,
6449 &args.seq_args, &res.seq_res);
6450 if (status)
6451 return status;
6452
6453 renew_lease(server, now);
6454 return 0;
6455 }
6456
6457 #ifdef CONFIG_NFS_V4_1
6458
6459 /*
6460 * This operation also signals the server that this client is
6461 * performing migration recovery. The server can stop asserting
6462 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6463 * performing this operation is identified in the SEQUENCE
6464 * operation in this compound.
6465 *
6466 * When the client supports GETATTR(fs_locations_info), it can
6467 * be plumbed in here.
6468 */
6469 static int _nfs41_proc_get_locations(struct inode *inode,
6470 struct nfs4_fs_locations *locations,
6471 struct page *page, struct rpc_cred *cred)
6472 {
6473 struct nfs_server *server = NFS_SERVER(inode);
6474 struct rpc_clnt *clnt = server->client;
6475 u32 bitmask[2] = {
6476 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6477 };
6478 struct nfs4_fs_locations_arg args = {
6479 .fh = NFS_FH(inode),
6480 .page = page,
6481 .bitmask = bitmask,
6482 .migration = 1, /* skip LOOKUP */
6483 };
6484 struct nfs4_fs_locations_res res = {
6485 .fs_locations = locations,
6486 .migration = 1,
6487 };
6488 struct rpc_message msg = {
6489 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6490 .rpc_argp = &args,
6491 .rpc_resp = &res,
6492 .rpc_cred = cred,
6493 };
6494 int status;
6495
6496 nfs_fattr_init(&locations->fattr);
6497 locations->server = server;
6498 locations->nlocations = 0;
6499
6500 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6501 nfs4_set_sequence_privileged(&args.seq_args);
6502 status = nfs4_call_sync_sequence(clnt, server, &msg,
6503 &args.seq_args, &res.seq_res);
6504 if (status == NFS4_OK &&
6505 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6506 status = -NFS4ERR_LEASE_MOVED;
6507 return status;
6508 }
6509
6510 #endif /* CONFIG_NFS_V4_1 */
6511
6512 /**
6513 * nfs4_proc_get_locations - discover locations for a migrated FSID
6514 * @inode: inode on FSID that is migrating
6515 * @locations: result of query
6516 * @page: buffer
6517 * @cred: credential to use for this operation
6518 *
6519 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6520 * operation failed, or a negative errno if a local error occurred.
6521 *
6522 * On success, "locations" is filled in, but if the server has
6523 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6524 * asserted.
6525 *
6526 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6527 * from this client that require migration recovery.
6528 */
6529 int nfs4_proc_get_locations(struct inode *inode,
6530 struct nfs4_fs_locations *locations,
6531 struct page *page, struct rpc_cred *cred)
6532 {
6533 struct nfs_server *server = NFS_SERVER(inode);
6534 struct nfs_client *clp = server->nfs_client;
6535 const struct nfs4_mig_recovery_ops *ops =
6536 clp->cl_mvops->mig_recovery_ops;
6537 struct nfs4_exception exception = { };
6538 int status;
6539
6540 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6541 (unsigned long long)server->fsid.major,
6542 (unsigned long long)server->fsid.minor,
6543 clp->cl_hostname);
6544 nfs_display_fhandle(NFS_FH(inode), __func__);
6545
6546 do {
6547 status = ops->get_locations(inode, locations, page, cred);
6548 if (status != -NFS4ERR_DELAY)
6549 break;
6550 nfs4_handle_exception(server, status, &exception);
6551 } while (exception.retry);
6552 return status;
6553 }
6554
6555 /*
6556 * This operation also signals the server that this client is
6557 * performing "lease moved" recovery. The server can stop
6558 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6559 * is appended to this compound to identify the client ID which is
6560 * performing recovery.
6561 */
6562 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6563 {
6564 struct nfs_server *server = NFS_SERVER(inode);
6565 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6566 struct rpc_clnt *clnt = server->client;
6567 struct nfs4_fsid_present_arg args = {
6568 .fh = NFS_FH(inode),
6569 .clientid = clp->cl_clientid,
6570 .renew = 1, /* append RENEW */
6571 };
6572 struct nfs4_fsid_present_res res = {
6573 .renew = 1,
6574 };
6575 struct rpc_message msg = {
6576 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6577 .rpc_argp = &args,
6578 .rpc_resp = &res,
6579 .rpc_cred = cred,
6580 };
6581 unsigned long now = jiffies;
6582 int status;
6583
6584 res.fh = nfs_alloc_fhandle();
6585 if (res.fh == NULL)
6586 return -ENOMEM;
6587
6588 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6589 nfs4_set_sequence_privileged(&args.seq_args);
6590 status = nfs4_call_sync_sequence(clnt, server, &msg,
6591 &args.seq_args, &res.seq_res);
6592 nfs_free_fhandle(res.fh);
6593 if (status)
6594 return status;
6595
6596 do_renew_lease(clp, now);
6597 return 0;
6598 }
6599
6600 #ifdef CONFIG_NFS_V4_1
6601
6602 /*
6603 * This operation also signals the server that this client is
6604 * performing "lease moved" recovery. The server can stop asserting
6605 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6606 * this operation is identified in the SEQUENCE operation in this
6607 * compound.
6608 */
6609 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6610 {
6611 struct nfs_server *server = NFS_SERVER(inode);
6612 struct rpc_clnt *clnt = server->client;
6613 struct nfs4_fsid_present_arg args = {
6614 .fh = NFS_FH(inode),
6615 };
6616 struct nfs4_fsid_present_res res = {
6617 };
6618 struct rpc_message msg = {
6619 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6620 .rpc_argp = &args,
6621 .rpc_resp = &res,
6622 .rpc_cred = cred,
6623 };
6624 int status;
6625
6626 res.fh = nfs_alloc_fhandle();
6627 if (res.fh == NULL)
6628 return -ENOMEM;
6629
6630 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6631 nfs4_set_sequence_privileged(&args.seq_args);
6632 status = nfs4_call_sync_sequence(clnt, server, &msg,
6633 &args.seq_args, &res.seq_res);
6634 nfs_free_fhandle(res.fh);
6635 if (status == NFS4_OK &&
6636 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6637 status = -NFS4ERR_LEASE_MOVED;
6638 return status;
6639 }
6640
6641 #endif /* CONFIG_NFS_V4_1 */
6642
6643 /**
6644 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6645 * @inode: inode on FSID to check
6646 * @cred: credential to use for this operation
6647 *
6648 * Server indicates whether the FSID is present, moved, or not
6649 * recognized. This operation is necessary to clear a LEASE_MOVED
6650 * condition for this client ID.
6651 *
6652 * Returns NFS4_OK if the FSID is present on this server,
6653 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6654 * NFS4ERR code if some error occurred on the server, or a
6655 * negative errno if a local failure occurred.
6656 */
6657 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6658 {
6659 struct nfs_server *server = NFS_SERVER(inode);
6660 struct nfs_client *clp = server->nfs_client;
6661 const struct nfs4_mig_recovery_ops *ops =
6662 clp->cl_mvops->mig_recovery_ops;
6663 struct nfs4_exception exception = { };
6664 int status;
6665
6666 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6667 (unsigned long long)server->fsid.major,
6668 (unsigned long long)server->fsid.minor,
6669 clp->cl_hostname);
6670 nfs_display_fhandle(NFS_FH(inode), __func__);
6671
6672 do {
6673 status = ops->fsid_present(inode, cred);
6674 if (status != -NFS4ERR_DELAY)
6675 break;
6676 nfs4_handle_exception(server, status, &exception);
6677 } while (exception.retry);
6678 return status;
6679 }
6680
6681 /**
6682 * If 'use_integrity' is true and the state managment nfs_client
6683 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6684 * and the machine credential as per RFC3530bis and RFC5661 Security
6685 * Considerations sections. Otherwise, just use the user cred with the
6686 * filesystem's rpc_client.
6687 */
6688 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6689 {
6690 int status;
6691 struct nfs4_secinfo_arg args = {
6692 .dir_fh = NFS_FH(dir),
6693 .name = name,
6694 };
6695 struct nfs4_secinfo_res res = {
6696 .flavors = flavors,
6697 };
6698 struct rpc_message msg = {
6699 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6700 .rpc_argp = &args,
6701 .rpc_resp = &res,
6702 };
6703 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6704 struct rpc_cred *cred = NULL;
6705
6706 if (use_integrity) {
6707 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6708 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6709 msg.rpc_cred = cred;
6710 }
6711
6712 dprintk("NFS call secinfo %s\n", name->name);
6713
6714 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6715 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6716
6717 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6718 &res.seq_res, 0);
6719 dprintk("NFS reply secinfo: %d\n", status);
6720
6721 if (cred)
6722 put_rpccred(cred);
6723
6724 return status;
6725 }
6726
6727 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6728 struct nfs4_secinfo_flavors *flavors)
6729 {
6730 struct nfs4_exception exception = { };
6731 int err;
6732 do {
6733 err = -NFS4ERR_WRONGSEC;
6734
6735 /* try to use integrity protection with machine cred */
6736 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6737 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6738
6739 /*
6740 * if unable to use integrity protection, or SECINFO with
6741 * integrity protection returns NFS4ERR_WRONGSEC (which is
6742 * disallowed by spec, but exists in deployed servers) use
6743 * the current filesystem's rpc_client and the user cred.
6744 */
6745 if (err == -NFS4ERR_WRONGSEC)
6746 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6747
6748 trace_nfs4_secinfo(dir, name, err);
6749 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6750 &exception);
6751 } while (exception.retry);
6752 return err;
6753 }
6754
6755 #ifdef CONFIG_NFS_V4_1
6756 /*
6757 * Check the exchange flags returned by the server for invalid flags, having
6758 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6759 * DS flags set.
6760 */
6761 static int nfs4_check_cl_exchange_flags(u32 flags)
6762 {
6763 if (flags & ~EXCHGID4_FLAG_MASK_R)
6764 goto out_inval;
6765 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6766 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6767 goto out_inval;
6768 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6769 goto out_inval;
6770 return NFS_OK;
6771 out_inval:
6772 return -NFS4ERR_INVAL;
6773 }
6774
6775 static bool
6776 nfs41_same_server_scope(struct nfs41_server_scope *a,
6777 struct nfs41_server_scope *b)
6778 {
6779 if (a->server_scope_sz == b->server_scope_sz &&
6780 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6781 return true;
6782
6783 return false;
6784 }
6785
6786 static void
6787 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
6788 {
6789 }
6790
6791 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
6792 .rpc_call_done = &nfs4_bind_one_conn_to_session_done,
6793 };
6794
6795 /*
6796 * nfs4_proc_bind_one_conn_to_session()
6797 *
6798 * The 4.1 client currently uses the same TCP connection for the
6799 * fore and backchannel.
6800 */
6801 static
6802 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
6803 struct rpc_xprt *xprt,
6804 struct nfs_client *clp,
6805 struct rpc_cred *cred)
6806 {
6807 int status;
6808 struct nfs41_bind_conn_to_session_args args = {
6809 .client = clp,
6810 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6811 };
6812 struct nfs41_bind_conn_to_session_res res;
6813 struct rpc_message msg = {
6814 .rpc_proc =
6815 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6816 .rpc_argp = &args,
6817 .rpc_resp = &res,
6818 .rpc_cred = cred,
6819 };
6820 struct rpc_task_setup task_setup_data = {
6821 .rpc_client = clnt,
6822 .rpc_xprt = xprt,
6823 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
6824 .rpc_message = &msg,
6825 .flags = RPC_TASK_TIMEOUT,
6826 };
6827 struct rpc_task *task;
6828
6829 dprintk("--> %s\n", __func__);
6830
6831 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6832 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6833 args.dir = NFS4_CDFC4_FORE;
6834
6835 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
6836 if (xprt != rcu_access_pointer(clnt->cl_xprt))
6837 args.dir = NFS4_CDFC4_FORE;
6838
6839 task = rpc_run_task(&task_setup_data);
6840 if (!IS_ERR(task)) {
6841 status = task->tk_status;
6842 rpc_put_task(task);
6843 } else
6844 status = PTR_ERR(task);
6845 trace_nfs4_bind_conn_to_session(clp, status);
6846 if (status == 0) {
6847 if (memcmp(res.sessionid.data,
6848 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6849 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6850 status = -EIO;
6851 goto out;
6852 }
6853 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6854 dprintk("NFS: %s: Unexpected direction from server\n",
6855 __func__);
6856 status = -EIO;
6857 goto out;
6858 }
6859 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6860 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6861 __func__);
6862 status = -EIO;
6863 goto out;
6864 }
6865 }
6866 out:
6867 dprintk("<-- %s status= %d\n", __func__, status);
6868 return status;
6869 }
6870
6871 struct rpc_bind_conn_calldata {
6872 struct nfs_client *clp;
6873 struct rpc_cred *cred;
6874 };
6875
6876 static int
6877 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
6878 struct rpc_xprt *xprt,
6879 void *calldata)
6880 {
6881 struct rpc_bind_conn_calldata *p = calldata;
6882
6883 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
6884 }
6885
6886 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6887 {
6888 struct rpc_bind_conn_calldata data = {
6889 .clp = clp,
6890 .cred = cred,
6891 };
6892 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
6893 nfs4_proc_bind_conn_to_session_callback, &data);
6894 }
6895
6896 /*
6897 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6898 * and operations we'd like to see to enable certain features in the allow map
6899 */
6900 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6901 .how = SP4_MACH_CRED,
6902 .enforce.u.words = {
6903 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6904 1 << (OP_EXCHANGE_ID - 32) |
6905 1 << (OP_CREATE_SESSION - 32) |
6906 1 << (OP_DESTROY_SESSION - 32) |
6907 1 << (OP_DESTROY_CLIENTID - 32)
6908 },
6909 .allow.u.words = {
6910 [0] = 1 << (OP_CLOSE) |
6911 1 << (OP_OPEN_DOWNGRADE) |
6912 1 << (OP_LOCKU) |
6913 1 << (OP_DELEGRETURN) |
6914 1 << (OP_COMMIT),
6915 [1] = 1 << (OP_SECINFO - 32) |
6916 1 << (OP_SECINFO_NO_NAME - 32) |
6917 1 << (OP_LAYOUTRETURN - 32) |
6918 1 << (OP_TEST_STATEID - 32) |
6919 1 << (OP_FREE_STATEID - 32) |
6920 1 << (OP_WRITE - 32)
6921 }
6922 };
6923
6924 /*
6925 * Select the state protection mode for client `clp' given the server results
6926 * from exchange_id in `sp'.
6927 *
6928 * Returns 0 on success, negative errno otherwise.
6929 */
6930 static int nfs4_sp4_select_mode(struct nfs_client *clp,
6931 struct nfs41_state_protection *sp)
6932 {
6933 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
6934 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6935 1 << (OP_EXCHANGE_ID - 32) |
6936 1 << (OP_CREATE_SESSION - 32) |
6937 1 << (OP_DESTROY_SESSION - 32) |
6938 1 << (OP_DESTROY_CLIENTID - 32)
6939 };
6940 unsigned int i;
6941
6942 if (sp->how == SP4_MACH_CRED) {
6943 /* Print state protect result */
6944 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
6945 for (i = 0; i <= LAST_NFS4_OP; i++) {
6946 if (test_bit(i, sp->enforce.u.longs))
6947 dfprintk(MOUNT, " enforce op %d\n", i);
6948 if (test_bit(i, sp->allow.u.longs))
6949 dfprintk(MOUNT, " allow op %d\n", i);
6950 }
6951
6952 /* make sure nothing is on enforce list that isn't supported */
6953 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
6954 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
6955 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6956 return -EINVAL;
6957 }
6958 }
6959
6960 /*
6961 * Minimal mode - state operations are allowed to use machine
6962 * credential. Note this already happens by default, so the
6963 * client doesn't have to do anything more than the negotiation.
6964 *
6965 * NOTE: we don't care if EXCHANGE_ID is in the list -
6966 * we're already using the machine cred for exchange_id
6967 * and will never use a different cred.
6968 */
6969 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
6970 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
6971 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
6972 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
6973 dfprintk(MOUNT, "sp4_mach_cred:\n");
6974 dfprintk(MOUNT, " minimal mode enabled\n");
6975 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
6976 } else {
6977 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6978 return -EINVAL;
6979 }
6980
6981 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
6982 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
6983 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
6984 test_bit(OP_LOCKU, sp->allow.u.longs)) {
6985 dfprintk(MOUNT, " cleanup mode enabled\n");
6986 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
6987 }
6988
6989 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
6990 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
6991 set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP,
6992 &clp->cl_sp4_flags);
6993 }
6994
6995 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
6996 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
6997 dfprintk(MOUNT, " secinfo mode enabled\n");
6998 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
6999 }
7000
7001 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
7002 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
7003 dfprintk(MOUNT, " stateid mode enabled\n");
7004 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
7005 }
7006
7007 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
7008 dfprintk(MOUNT, " write mode enabled\n");
7009 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
7010 }
7011
7012 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
7013 dfprintk(MOUNT, " commit mode enabled\n");
7014 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
7015 }
7016 }
7017
7018 return 0;
7019 }
7020
7021 /*
7022 * _nfs4_proc_exchange_id()
7023 *
7024 * Wrapper for EXCHANGE_ID operation.
7025 */
7026 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
7027 u32 sp4_how)
7028 {
7029 nfs4_verifier verifier;
7030 struct nfs41_exchange_id_args args = {
7031 .verifier = &verifier,
7032 .client = clp,
7033 #ifdef CONFIG_NFS_V4_1_MIGRATION
7034 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
7035 EXCHGID4_FLAG_BIND_PRINC_STATEID |
7036 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
7037 #else
7038 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
7039 EXCHGID4_FLAG_BIND_PRINC_STATEID,
7040 #endif
7041 };
7042 struct nfs41_exchange_id_res res = {
7043 0
7044 };
7045 int status;
7046 struct rpc_message msg = {
7047 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
7048 .rpc_argp = &args,
7049 .rpc_resp = &res,
7050 .rpc_cred = cred,
7051 };
7052
7053 nfs4_init_boot_verifier(clp, &verifier);
7054
7055 status = nfs4_init_uniform_client_string(clp);
7056 if (status)
7057 goto out;
7058
7059 dprintk("NFS call exchange_id auth=%s, '%s'\n",
7060 clp->cl_rpcclient->cl_auth->au_ops->au_name,
7061 clp->cl_owner_id);
7062
7063 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
7064 GFP_NOFS);
7065 if (unlikely(res.server_owner == NULL)) {
7066 status = -ENOMEM;
7067 goto out;
7068 }
7069
7070 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
7071 GFP_NOFS);
7072 if (unlikely(res.server_scope == NULL)) {
7073 status = -ENOMEM;
7074 goto out_server_owner;
7075 }
7076
7077 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
7078 if (unlikely(res.impl_id == NULL)) {
7079 status = -ENOMEM;
7080 goto out_server_scope;
7081 }
7082
7083 switch (sp4_how) {
7084 case SP4_NONE:
7085 args.state_protect.how = SP4_NONE;
7086 break;
7087
7088 case SP4_MACH_CRED:
7089 args.state_protect = nfs4_sp4_mach_cred_request;
7090 break;
7091
7092 default:
7093 /* unsupported! */
7094 WARN_ON_ONCE(1);
7095 status = -EINVAL;
7096 goto out_impl_id;
7097 }
7098
7099 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7100 trace_nfs4_exchange_id(clp, status);
7101 if (status == 0)
7102 status = nfs4_check_cl_exchange_flags(res.flags);
7103
7104 if (status == 0)
7105 status = nfs4_sp4_select_mode(clp, &res.state_protect);
7106
7107 if (status == 0) {
7108 clp->cl_clientid = res.clientid;
7109 clp->cl_exchange_flags = res.flags;
7110 /* Client ID is not confirmed */
7111 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
7112 clear_bit(NFS4_SESSION_ESTABLISHED,
7113 &clp->cl_session->session_state);
7114 clp->cl_seqid = res.seqid;
7115 }
7116
7117 kfree(clp->cl_serverowner);
7118 clp->cl_serverowner = res.server_owner;
7119 res.server_owner = NULL;
7120
7121 /* use the most recent implementation id */
7122 kfree(clp->cl_implid);
7123 clp->cl_implid = res.impl_id;
7124 res.impl_id = NULL;
7125
7126 if (clp->cl_serverscope != NULL &&
7127 !nfs41_same_server_scope(clp->cl_serverscope,
7128 res.server_scope)) {
7129 dprintk("%s: server_scope mismatch detected\n",
7130 __func__);
7131 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7132 kfree(clp->cl_serverscope);
7133 clp->cl_serverscope = NULL;
7134 }
7135
7136 if (clp->cl_serverscope == NULL) {
7137 clp->cl_serverscope = res.server_scope;
7138 res.server_scope = NULL;
7139 }
7140 }
7141
7142 out_impl_id:
7143 kfree(res.impl_id);
7144 out_server_scope:
7145 kfree(res.server_scope);
7146 out_server_owner:
7147 kfree(res.server_owner);
7148 out:
7149 if (clp->cl_implid != NULL)
7150 dprintk("NFS reply exchange_id: Server Implementation ID: "
7151 "domain: %s, name: %s, date: %llu,%u\n",
7152 clp->cl_implid->domain, clp->cl_implid->name,
7153 clp->cl_implid->date.seconds,
7154 clp->cl_implid->date.nseconds);
7155 dprintk("NFS reply exchange_id: %d\n", status);
7156 return status;
7157 }
7158
7159 /*
7160 * nfs4_proc_exchange_id()
7161 *
7162 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7163 *
7164 * Since the clientid has expired, all compounds using sessions
7165 * associated with the stale clientid will be returning
7166 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7167 * be in some phase of session reset.
7168 *
7169 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7170 */
7171 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7172 {
7173 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7174 int status;
7175
7176 /* try SP4_MACH_CRED if krb5i/p */
7177 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7178 authflavor == RPC_AUTH_GSS_KRB5P) {
7179 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7180 if (!status)
7181 return 0;
7182 }
7183
7184 /* try SP4_NONE */
7185 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7186 }
7187
7188 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7189 struct rpc_cred *cred)
7190 {
7191 struct rpc_message msg = {
7192 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7193 .rpc_argp = clp,
7194 .rpc_cred = cred,
7195 };
7196 int status;
7197
7198 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7199 trace_nfs4_destroy_clientid(clp, status);
7200 if (status)
7201 dprintk("NFS: Got error %d from the server %s on "
7202 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7203 return status;
7204 }
7205
7206 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7207 struct rpc_cred *cred)
7208 {
7209 unsigned int loop;
7210 int ret;
7211
7212 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7213 ret = _nfs4_proc_destroy_clientid(clp, cred);
7214 switch (ret) {
7215 case -NFS4ERR_DELAY:
7216 case -NFS4ERR_CLIENTID_BUSY:
7217 ssleep(1);
7218 break;
7219 default:
7220 return ret;
7221 }
7222 }
7223 return 0;
7224 }
7225
7226 int nfs4_destroy_clientid(struct nfs_client *clp)
7227 {
7228 struct rpc_cred *cred;
7229 int ret = 0;
7230
7231 if (clp->cl_mvops->minor_version < 1)
7232 goto out;
7233 if (clp->cl_exchange_flags == 0)
7234 goto out;
7235 if (clp->cl_preserve_clid)
7236 goto out;
7237 cred = nfs4_get_clid_cred(clp);
7238 ret = nfs4_proc_destroy_clientid(clp, cred);
7239 if (cred)
7240 put_rpccred(cred);
7241 switch (ret) {
7242 case 0:
7243 case -NFS4ERR_STALE_CLIENTID:
7244 clp->cl_exchange_flags = 0;
7245 }
7246 out:
7247 return ret;
7248 }
7249
7250 struct nfs4_get_lease_time_data {
7251 struct nfs4_get_lease_time_args *args;
7252 struct nfs4_get_lease_time_res *res;
7253 struct nfs_client *clp;
7254 };
7255
7256 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7257 void *calldata)
7258 {
7259 struct nfs4_get_lease_time_data *data =
7260 (struct nfs4_get_lease_time_data *)calldata;
7261
7262 dprintk("--> %s\n", __func__);
7263 /* just setup sequence, do not trigger session recovery
7264 since we're invoked within one */
7265 nfs41_setup_sequence(data->clp->cl_session,
7266 &data->args->la_seq_args,
7267 &data->res->lr_seq_res,
7268 task);
7269 dprintk("<-- %s\n", __func__);
7270 }
7271
7272 /*
7273 * Called from nfs4_state_manager thread for session setup, so don't recover
7274 * from sequence operation or clientid errors.
7275 */
7276 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7277 {
7278 struct nfs4_get_lease_time_data *data =
7279 (struct nfs4_get_lease_time_data *)calldata;
7280
7281 dprintk("--> %s\n", __func__);
7282 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7283 return;
7284 switch (task->tk_status) {
7285 case -NFS4ERR_DELAY:
7286 case -NFS4ERR_GRACE:
7287 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7288 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7289 task->tk_status = 0;
7290 /* fall through */
7291 case -NFS4ERR_RETRY_UNCACHED_REP:
7292 rpc_restart_call_prepare(task);
7293 return;
7294 }
7295 dprintk("<-- %s\n", __func__);
7296 }
7297
7298 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7299 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7300 .rpc_call_done = nfs4_get_lease_time_done,
7301 };
7302
7303 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7304 {
7305 struct rpc_task *task;
7306 struct nfs4_get_lease_time_args args;
7307 struct nfs4_get_lease_time_res res = {
7308 .lr_fsinfo = fsinfo,
7309 };
7310 struct nfs4_get_lease_time_data data = {
7311 .args = &args,
7312 .res = &res,
7313 .clp = clp,
7314 };
7315 struct rpc_message msg = {
7316 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7317 .rpc_argp = &args,
7318 .rpc_resp = &res,
7319 };
7320 struct rpc_task_setup task_setup = {
7321 .rpc_client = clp->cl_rpcclient,
7322 .rpc_message = &msg,
7323 .callback_ops = &nfs4_get_lease_time_ops,
7324 .callback_data = &data,
7325 .flags = RPC_TASK_TIMEOUT,
7326 };
7327 int status;
7328
7329 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7330 nfs4_set_sequence_privileged(&args.la_seq_args);
7331 dprintk("--> %s\n", __func__);
7332 task = rpc_run_task(&task_setup);
7333
7334 if (IS_ERR(task))
7335 status = PTR_ERR(task);
7336 else {
7337 status = task->tk_status;
7338 rpc_put_task(task);
7339 }
7340 dprintk("<-- %s return %d\n", __func__, status);
7341
7342 return status;
7343 }
7344
7345 /*
7346 * Initialize the values to be used by the client in CREATE_SESSION
7347 * If nfs4_init_session set the fore channel request and response sizes,
7348 * use them.
7349 *
7350 * Set the back channel max_resp_sz_cached to zero to force the client to
7351 * always set csa_cachethis to FALSE because the current implementation
7352 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7353 */
7354 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7355 {
7356 unsigned int max_rqst_sz, max_resp_sz;
7357
7358 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7359 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7360
7361 /* Fore channel attributes */
7362 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7363 args->fc_attrs.max_resp_sz = max_resp_sz;
7364 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7365 args->fc_attrs.max_reqs = max_session_slots;
7366
7367 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7368 "max_ops=%u max_reqs=%u\n",
7369 __func__,
7370 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7371 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7372
7373 /* Back channel attributes */
7374 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
7375 args->bc_attrs.max_resp_sz = PAGE_SIZE;
7376 args->bc_attrs.max_resp_sz_cached = 0;
7377 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7378 args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS;
7379
7380 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7381 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7382 __func__,
7383 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7384 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7385 args->bc_attrs.max_reqs);
7386 }
7387
7388 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7389 struct nfs41_create_session_res *res)
7390 {
7391 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7392 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7393
7394 if (rcvd->max_resp_sz > sent->max_resp_sz)
7395 return -EINVAL;
7396 /*
7397 * Our requested max_ops is the minimum we need; we're not
7398 * prepared to break up compounds into smaller pieces than that.
7399 * So, no point even trying to continue if the server won't
7400 * cooperate:
7401 */
7402 if (rcvd->max_ops < sent->max_ops)
7403 return -EINVAL;
7404 if (rcvd->max_reqs == 0)
7405 return -EINVAL;
7406 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7407 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7408 return 0;
7409 }
7410
7411 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7412 struct nfs41_create_session_res *res)
7413 {
7414 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7415 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7416
7417 if (!(res->flags & SESSION4_BACK_CHAN))
7418 goto out;
7419 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7420 return -EINVAL;
7421 if (rcvd->max_resp_sz < sent->max_resp_sz)
7422 return -EINVAL;
7423 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7424 return -EINVAL;
7425 /* These would render the backchannel useless: */
7426 if (rcvd->max_ops != sent->max_ops)
7427 return -EINVAL;
7428 if (rcvd->max_reqs != sent->max_reqs)
7429 return -EINVAL;
7430 out:
7431 return 0;
7432 }
7433
7434 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7435 struct nfs41_create_session_res *res)
7436 {
7437 int ret;
7438
7439 ret = nfs4_verify_fore_channel_attrs(args, res);
7440 if (ret)
7441 return ret;
7442 return nfs4_verify_back_channel_attrs(args, res);
7443 }
7444
7445 static void nfs4_update_session(struct nfs4_session *session,
7446 struct nfs41_create_session_res *res)
7447 {
7448 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7449 /* Mark client id and session as being confirmed */
7450 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7451 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7452 session->flags = res->flags;
7453 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7454 if (res->flags & SESSION4_BACK_CHAN)
7455 memcpy(&session->bc_attrs, &res->bc_attrs,
7456 sizeof(session->bc_attrs));
7457 }
7458
7459 static int _nfs4_proc_create_session(struct nfs_client *clp,
7460 struct rpc_cred *cred)
7461 {
7462 struct nfs4_session *session = clp->cl_session;
7463 struct nfs41_create_session_args args = {
7464 .client = clp,
7465 .clientid = clp->cl_clientid,
7466 .seqid = clp->cl_seqid,
7467 .cb_program = NFS4_CALLBACK,
7468 };
7469 struct nfs41_create_session_res res;
7470
7471 struct rpc_message msg = {
7472 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7473 .rpc_argp = &args,
7474 .rpc_resp = &res,
7475 .rpc_cred = cred,
7476 };
7477 int status;
7478
7479 nfs4_init_channel_attrs(&args);
7480 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7481
7482 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7483 trace_nfs4_create_session(clp, status);
7484
7485 if (!status) {
7486 /* Verify the session's negotiated channel_attrs values */
7487 status = nfs4_verify_channel_attrs(&args, &res);
7488 /* Increment the clientid slot sequence id */
7489 if (clp->cl_seqid == res.seqid)
7490 clp->cl_seqid++;
7491 if (status)
7492 goto out;
7493 nfs4_update_session(session, &res);
7494 }
7495 out:
7496 return status;
7497 }
7498
7499 /*
7500 * Issues a CREATE_SESSION operation to the server.
7501 * It is the responsibility of the caller to verify the session is
7502 * expired before calling this routine.
7503 */
7504 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7505 {
7506 int status;
7507 unsigned *ptr;
7508 struct nfs4_session *session = clp->cl_session;
7509
7510 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7511
7512 status = _nfs4_proc_create_session(clp, cred);
7513 if (status)
7514 goto out;
7515
7516 /* Init or reset the session slot tables */
7517 status = nfs4_setup_session_slot_tables(session);
7518 dprintk("slot table setup returned %d\n", status);
7519 if (status)
7520 goto out;
7521
7522 ptr = (unsigned *)&session->sess_id.data[0];
7523 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7524 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7525 out:
7526 dprintk("<-- %s\n", __func__);
7527 return status;
7528 }
7529
7530 /*
7531 * Issue the over-the-wire RPC DESTROY_SESSION.
7532 * The caller must serialize access to this routine.
7533 */
7534 int nfs4_proc_destroy_session(struct nfs4_session *session,
7535 struct rpc_cred *cred)
7536 {
7537 struct rpc_message msg = {
7538 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7539 .rpc_argp = session,
7540 .rpc_cred = cred,
7541 };
7542 int status = 0;
7543
7544 dprintk("--> nfs4_proc_destroy_session\n");
7545
7546 /* session is still being setup */
7547 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7548 return 0;
7549
7550 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7551 trace_nfs4_destroy_session(session->clp, status);
7552
7553 if (status)
7554 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7555 "Session has been destroyed regardless...\n", status);
7556
7557 dprintk("<-- nfs4_proc_destroy_session\n");
7558 return status;
7559 }
7560
7561 /*
7562 * Renew the cl_session lease.
7563 */
7564 struct nfs4_sequence_data {
7565 struct nfs_client *clp;
7566 struct nfs4_sequence_args args;
7567 struct nfs4_sequence_res res;
7568 };
7569
7570 static void nfs41_sequence_release(void *data)
7571 {
7572 struct nfs4_sequence_data *calldata = data;
7573 struct nfs_client *clp = calldata->clp;
7574
7575 if (atomic_read(&clp->cl_count) > 1)
7576 nfs4_schedule_state_renewal(clp);
7577 nfs_put_client(clp);
7578 kfree(calldata);
7579 }
7580
7581 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7582 {
7583 switch(task->tk_status) {
7584 case -NFS4ERR_DELAY:
7585 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7586 return -EAGAIN;
7587 default:
7588 nfs4_schedule_lease_recovery(clp);
7589 }
7590 return 0;
7591 }
7592
7593 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7594 {
7595 struct nfs4_sequence_data *calldata = data;
7596 struct nfs_client *clp = calldata->clp;
7597
7598 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7599 return;
7600
7601 trace_nfs4_sequence(clp, task->tk_status);
7602 if (task->tk_status < 0) {
7603 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7604 if (atomic_read(&clp->cl_count) == 1)
7605 goto out;
7606
7607 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7608 rpc_restart_call_prepare(task);
7609 return;
7610 }
7611 }
7612 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7613 out:
7614 dprintk("<-- %s\n", __func__);
7615 }
7616
7617 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7618 {
7619 struct nfs4_sequence_data *calldata = data;
7620 struct nfs_client *clp = calldata->clp;
7621 struct nfs4_sequence_args *args;
7622 struct nfs4_sequence_res *res;
7623
7624 args = task->tk_msg.rpc_argp;
7625 res = task->tk_msg.rpc_resp;
7626
7627 nfs41_setup_sequence(clp->cl_session, args, res, task);
7628 }
7629
7630 static const struct rpc_call_ops nfs41_sequence_ops = {
7631 .rpc_call_done = nfs41_sequence_call_done,
7632 .rpc_call_prepare = nfs41_sequence_prepare,
7633 .rpc_release = nfs41_sequence_release,
7634 };
7635
7636 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7637 struct rpc_cred *cred,
7638 bool is_privileged)
7639 {
7640 struct nfs4_sequence_data *calldata;
7641 struct rpc_message msg = {
7642 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7643 .rpc_cred = cred,
7644 };
7645 struct rpc_task_setup task_setup_data = {
7646 .rpc_client = clp->cl_rpcclient,
7647 .rpc_message = &msg,
7648 .callback_ops = &nfs41_sequence_ops,
7649 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7650 };
7651
7652 if (!atomic_inc_not_zero(&clp->cl_count))
7653 return ERR_PTR(-EIO);
7654 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7655 if (calldata == NULL) {
7656 nfs_put_client(clp);
7657 return ERR_PTR(-ENOMEM);
7658 }
7659 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7660 if (is_privileged)
7661 nfs4_set_sequence_privileged(&calldata->args);
7662 msg.rpc_argp = &calldata->args;
7663 msg.rpc_resp = &calldata->res;
7664 calldata->clp = clp;
7665 task_setup_data.callback_data = calldata;
7666
7667 return rpc_run_task(&task_setup_data);
7668 }
7669
7670 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7671 {
7672 struct rpc_task *task;
7673 int ret = 0;
7674
7675 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7676 return -EAGAIN;
7677 task = _nfs41_proc_sequence(clp, cred, false);
7678 if (IS_ERR(task))
7679 ret = PTR_ERR(task);
7680 else
7681 rpc_put_task_async(task);
7682 dprintk("<-- %s status=%d\n", __func__, ret);
7683 return ret;
7684 }
7685
7686 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7687 {
7688 struct rpc_task *task;
7689 int ret;
7690
7691 task = _nfs41_proc_sequence(clp, cred, true);
7692 if (IS_ERR(task)) {
7693 ret = PTR_ERR(task);
7694 goto out;
7695 }
7696 ret = rpc_wait_for_completion_task(task);
7697 if (!ret)
7698 ret = task->tk_status;
7699 rpc_put_task(task);
7700 out:
7701 dprintk("<-- %s status=%d\n", __func__, ret);
7702 return ret;
7703 }
7704
7705 struct nfs4_reclaim_complete_data {
7706 struct nfs_client *clp;
7707 struct nfs41_reclaim_complete_args arg;
7708 struct nfs41_reclaim_complete_res res;
7709 };
7710
7711 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7712 {
7713 struct nfs4_reclaim_complete_data *calldata = data;
7714
7715 nfs41_setup_sequence(calldata->clp->cl_session,
7716 &calldata->arg.seq_args,
7717 &calldata->res.seq_res,
7718 task);
7719 }
7720
7721 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7722 {
7723 switch(task->tk_status) {
7724 case 0:
7725 case -NFS4ERR_COMPLETE_ALREADY:
7726 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7727 break;
7728 case -NFS4ERR_DELAY:
7729 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7730 /* fall through */
7731 case -NFS4ERR_RETRY_UNCACHED_REP:
7732 return -EAGAIN;
7733 default:
7734 nfs4_schedule_lease_recovery(clp);
7735 }
7736 return 0;
7737 }
7738
7739 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7740 {
7741 struct nfs4_reclaim_complete_data *calldata = data;
7742 struct nfs_client *clp = calldata->clp;
7743 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7744
7745 dprintk("--> %s\n", __func__);
7746 if (!nfs41_sequence_done(task, res))
7747 return;
7748
7749 trace_nfs4_reclaim_complete(clp, task->tk_status);
7750 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7751 rpc_restart_call_prepare(task);
7752 return;
7753 }
7754 dprintk("<-- %s\n", __func__);
7755 }
7756
7757 static void nfs4_free_reclaim_complete_data(void *data)
7758 {
7759 struct nfs4_reclaim_complete_data *calldata = data;
7760
7761 kfree(calldata);
7762 }
7763
7764 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7765 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7766 .rpc_call_done = nfs4_reclaim_complete_done,
7767 .rpc_release = nfs4_free_reclaim_complete_data,
7768 };
7769
7770 /*
7771 * Issue a global reclaim complete.
7772 */
7773 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7774 struct rpc_cred *cred)
7775 {
7776 struct nfs4_reclaim_complete_data *calldata;
7777 struct rpc_task *task;
7778 struct rpc_message msg = {
7779 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7780 .rpc_cred = cred,
7781 };
7782 struct rpc_task_setup task_setup_data = {
7783 .rpc_client = clp->cl_rpcclient,
7784 .rpc_message = &msg,
7785 .callback_ops = &nfs4_reclaim_complete_call_ops,
7786 .flags = RPC_TASK_ASYNC,
7787 };
7788 int status = -ENOMEM;
7789
7790 dprintk("--> %s\n", __func__);
7791 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7792 if (calldata == NULL)
7793 goto out;
7794 calldata->clp = clp;
7795 calldata->arg.one_fs = 0;
7796
7797 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7798 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7799 msg.rpc_argp = &calldata->arg;
7800 msg.rpc_resp = &calldata->res;
7801 task_setup_data.callback_data = calldata;
7802 task = rpc_run_task(&task_setup_data);
7803 if (IS_ERR(task)) {
7804 status = PTR_ERR(task);
7805 goto out;
7806 }
7807 status = nfs4_wait_for_completion_rpc_task(task);
7808 if (status == 0)
7809 status = task->tk_status;
7810 rpc_put_task(task);
7811 return 0;
7812 out:
7813 dprintk("<-- %s status=%d\n", __func__, status);
7814 return status;
7815 }
7816
7817 static void
7818 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7819 {
7820 struct nfs4_layoutget *lgp = calldata;
7821 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7822 struct nfs4_session *session = nfs4_get_session(server);
7823 int ret;
7824
7825 dprintk("--> %s\n", __func__);
7826 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
7827 * right now covering the LAYOUTGET we are about to send.
7828 * However, that is not so catastrophic, and there seems
7829 * to be no way to prevent it completely.
7830 */
7831 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
7832 &lgp->res.seq_res, task))
7833 return;
7834 ret = pnfs_choose_layoutget_stateid(&lgp->args.stateid,
7835 NFS_I(lgp->args.inode)->layout,
7836 &lgp->args.range,
7837 lgp->args.ctx->state);
7838 if (ret < 0)
7839 rpc_exit(task, ret);
7840 }
7841
7842 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7843 {
7844 struct nfs4_layoutget *lgp = calldata;
7845 struct inode *inode = lgp->args.inode;
7846 struct nfs_server *server = NFS_SERVER(inode);
7847 struct pnfs_layout_hdr *lo;
7848 struct nfs4_state *state = NULL;
7849 unsigned long timeo, now, giveup;
7850
7851 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7852
7853 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
7854 goto out;
7855
7856 switch (task->tk_status) {
7857 case 0:
7858 goto out;
7859
7860 /*
7861 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
7862 * on the file. set tk_status to -ENODATA to tell upper layer to
7863 * retry go inband.
7864 */
7865 case -NFS4ERR_LAYOUTUNAVAILABLE:
7866 task->tk_status = -ENODATA;
7867 goto out;
7868 /*
7869 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
7870 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
7871 */
7872 case -NFS4ERR_BADLAYOUT:
7873 goto out_overflow;
7874 /*
7875 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7876 * (or clients) writing to the same RAID stripe except when
7877 * the minlength argument is 0 (see RFC5661 section 18.43.3).
7878 */
7879 case -NFS4ERR_LAYOUTTRYLATER:
7880 if (lgp->args.minlength == 0)
7881 goto out_overflow;
7882 /*
7883 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
7884 * existing layout before getting a new one).
7885 */
7886 case -NFS4ERR_RECALLCONFLICT:
7887 timeo = rpc_get_timeout(task->tk_client);
7888 giveup = lgp->args.timestamp + timeo;
7889 now = jiffies;
7890 if (time_after(giveup, now)) {
7891 unsigned long delay;
7892
7893 /* Delay for:
7894 * - Not less then NFS4_POLL_RETRY_MIN.
7895 * - One last time a jiffie before we give up
7896 * - exponential backoff (time_now minus start_attempt)
7897 */
7898 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
7899 min((giveup - now - 1),
7900 now - lgp->args.timestamp));
7901
7902 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7903 __func__, delay);
7904 rpc_delay(task, delay);
7905 /* Do not call nfs4_async_handle_error() */
7906 goto out_restart;
7907 }
7908 break;
7909 case -NFS4ERR_EXPIRED:
7910 case -NFS4ERR_BAD_STATEID:
7911 spin_lock(&inode->i_lock);
7912 if (nfs4_stateid_match(&lgp->args.stateid,
7913 &lgp->args.ctx->state->stateid)) {
7914 spin_unlock(&inode->i_lock);
7915 /* If the open stateid was bad, then recover it. */
7916 state = lgp->args.ctx->state;
7917 break;
7918 }
7919 lo = NFS_I(inode)->layout;
7920 if (lo && nfs4_stateid_match(&lgp->args.stateid,
7921 &lo->plh_stateid)) {
7922 LIST_HEAD(head);
7923
7924 /*
7925 * Mark the bad layout state as invalid, then retry
7926 * with the current stateid.
7927 */
7928 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
7929 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7930 spin_unlock(&inode->i_lock);
7931 pnfs_free_lseg_list(&head);
7932 } else
7933 spin_unlock(&inode->i_lock);
7934 goto out_restart;
7935 }
7936 if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN)
7937 goto out_restart;
7938 out:
7939 dprintk("<-- %s\n", __func__);
7940 return;
7941 out_restart:
7942 task->tk_status = 0;
7943 rpc_restart_call_prepare(task);
7944 return;
7945 out_overflow:
7946 task->tk_status = -EOVERFLOW;
7947 goto out;
7948 }
7949
7950 static size_t max_response_pages(struct nfs_server *server)
7951 {
7952 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
7953 return nfs_page_array_len(0, max_resp_sz);
7954 }
7955
7956 static void nfs4_free_pages(struct page **pages, size_t size)
7957 {
7958 int i;
7959
7960 if (!pages)
7961 return;
7962
7963 for (i = 0; i < size; i++) {
7964 if (!pages[i])
7965 break;
7966 __free_page(pages[i]);
7967 }
7968 kfree(pages);
7969 }
7970
7971 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
7972 {
7973 struct page **pages;
7974 int i;
7975
7976 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
7977 if (!pages) {
7978 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
7979 return NULL;
7980 }
7981
7982 for (i = 0; i < size; i++) {
7983 pages[i] = alloc_page(gfp_flags);
7984 if (!pages[i]) {
7985 dprintk("%s: failed to allocate page\n", __func__);
7986 nfs4_free_pages(pages, size);
7987 return NULL;
7988 }
7989 }
7990
7991 return pages;
7992 }
7993
7994 static void nfs4_layoutget_release(void *calldata)
7995 {
7996 struct nfs4_layoutget *lgp = calldata;
7997 struct inode *inode = lgp->args.inode;
7998 struct nfs_server *server = NFS_SERVER(inode);
7999 size_t max_pages = max_response_pages(server);
8000
8001 dprintk("--> %s\n", __func__);
8002 nfs4_free_pages(lgp->args.layout.pages, max_pages);
8003 pnfs_put_layout_hdr(NFS_I(inode)->layout);
8004 put_nfs_open_context(lgp->args.ctx);
8005 kfree(calldata);
8006 dprintk("<-- %s\n", __func__);
8007 }
8008
8009 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
8010 .rpc_call_prepare = nfs4_layoutget_prepare,
8011 .rpc_call_done = nfs4_layoutget_done,
8012 .rpc_release = nfs4_layoutget_release,
8013 };
8014
8015 struct pnfs_layout_segment *
8016 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
8017 {
8018 struct inode *inode = lgp->args.inode;
8019 struct nfs_server *server = NFS_SERVER(inode);
8020 size_t max_pages = max_response_pages(server);
8021 struct rpc_task *task;
8022 struct rpc_message msg = {
8023 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
8024 .rpc_argp = &lgp->args,
8025 .rpc_resp = &lgp->res,
8026 .rpc_cred = lgp->cred,
8027 };
8028 struct rpc_task_setup task_setup_data = {
8029 .rpc_client = server->client,
8030 .rpc_message = &msg,
8031 .callback_ops = &nfs4_layoutget_call_ops,
8032 .callback_data = lgp,
8033 .flags = RPC_TASK_ASYNC,
8034 };
8035 struct pnfs_layout_segment *lseg = NULL;
8036 int status = 0;
8037
8038 dprintk("--> %s\n", __func__);
8039
8040 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
8041 pnfs_get_layout_hdr(NFS_I(inode)->layout);
8042
8043 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
8044 if (!lgp->args.layout.pages) {
8045 nfs4_layoutget_release(lgp);
8046 return ERR_PTR(-ENOMEM);
8047 }
8048 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
8049 lgp->args.timestamp = jiffies;
8050
8051 lgp->res.layoutp = &lgp->args.layout;
8052 lgp->res.seq_res.sr_slot = NULL;
8053 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
8054
8055 task = rpc_run_task(&task_setup_data);
8056 if (IS_ERR(task))
8057 return ERR_CAST(task);
8058 status = nfs4_wait_for_completion_rpc_task(task);
8059 if (status == 0)
8060 status = task->tk_status;
8061 trace_nfs4_layoutget(lgp->args.ctx,
8062 &lgp->args.range,
8063 &lgp->res.range,
8064 &lgp->res.stateid,
8065 status);
8066 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
8067 if (status == 0 && lgp->res.layoutp->len)
8068 lseg = pnfs_layout_process(lgp);
8069 rpc_put_task(task);
8070 dprintk("<-- %s status=%d\n", __func__, status);
8071 if (status)
8072 return ERR_PTR(status);
8073 return lseg;
8074 }
8075
8076 static void
8077 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
8078 {
8079 struct nfs4_layoutreturn *lrp = calldata;
8080
8081 dprintk("--> %s\n", __func__);
8082 nfs41_setup_sequence(lrp->clp->cl_session,
8083 &lrp->args.seq_args,
8084 &lrp->res.seq_res,
8085 task);
8086 }
8087
8088 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
8089 {
8090 struct nfs4_layoutreturn *lrp = calldata;
8091 struct nfs_server *server;
8092
8093 dprintk("--> %s\n", __func__);
8094
8095 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
8096 return;
8097
8098 server = NFS_SERVER(lrp->args.inode);
8099 switch (task->tk_status) {
8100 default:
8101 task->tk_status = 0;
8102 case 0:
8103 break;
8104 case -NFS4ERR_DELAY:
8105 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
8106 break;
8107 rpc_restart_call_prepare(task);
8108 return;
8109 }
8110 dprintk("<-- %s\n", __func__);
8111 }
8112
8113 static void nfs4_layoutreturn_release(void *calldata)
8114 {
8115 struct nfs4_layoutreturn *lrp = calldata;
8116 struct pnfs_layout_hdr *lo = lrp->args.layout;
8117 LIST_HEAD(freeme);
8118
8119 dprintk("--> %s\n", __func__);
8120 spin_lock(&lo->plh_inode->i_lock);
8121 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
8122 pnfs_mark_layout_returned_if_empty(lo);
8123 if (lrp->res.lrs_present)
8124 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
8125 pnfs_clear_layoutreturn_waitbit(lo);
8126 spin_unlock(&lo->plh_inode->i_lock);
8127 pnfs_free_lseg_list(&freeme);
8128 pnfs_put_layout_hdr(lrp->args.layout);
8129 nfs_iput_and_deactive(lrp->inode);
8130 kfree(calldata);
8131 dprintk("<-- %s\n", __func__);
8132 }
8133
8134 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
8135 .rpc_call_prepare = nfs4_layoutreturn_prepare,
8136 .rpc_call_done = nfs4_layoutreturn_done,
8137 .rpc_release = nfs4_layoutreturn_release,
8138 };
8139
8140 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
8141 {
8142 struct rpc_task *task;
8143 struct rpc_message msg = {
8144 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
8145 .rpc_argp = &lrp->args,
8146 .rpc_resp = &lrp->res,
8147 .rpc_cred = lrp->cred,
8148 };
8149 struct rpc_task_setup task_setup_data = {
8150 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8151 .rpc_message = &msg,
8152 .callback_ops = &nfs4_layoutreturn_call_ops,
8153 .callback_data = lrp,
8154 };
8155 int status = 0;
8156
8157 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
8158 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
8159 &task_setup_data.rpc_client, &msg);
8160
8161 dprintk("--> %s\n", __func__);
8162 if (!sync) {
8163 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8164 if (!lrp->inode) {
8165 nfs4_layoutreturn_release(lrp);
8166 return -EAGAIN;
8167 }
8168 task_setup_data.flags |= RPC_TASK_ASYNC;
8169 }
8170 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8171 task = rpc_run_task(&task_setup_data);
8172 if (IS_ERR(task))
8173 return PTR_ERR(task);
8174 if (sync)
8175 status = task->tk_status;
8176 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
8177 dprintk("<-- %s status=%d\n", __func__, status);
8178 rpc_put_task(task);
8179 return status;
8180 }
8181
8182 static int
8183 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
8184 struct pnfs_device *pdev,
8185 struct rpc_cred *cred)
8186 {
8187 struct nfs4_getdeviceinfo_args args = {
8188 .pdev = pdev,
8189 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8190 NOTIFY_DEVICEID4_DELETE,
8191 };
8192 struct nfs4_getdeviceinfo_res res = {
8193 .pdev = pdev,
8194 };
8195 struct rpc_message msg = {
8196 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8197 .rpc_argp = &args,
8198 .rpc_resp = &res,
8199 .rpc_cred = cred,
8200 };
8201 int status;
8202
8203 dprintk("--> %s\n", __func__);
8204 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8205 if (res.notification & ~args.notify_types)
8206 dprintk("%s: unsupported notification\n", __func__);
8207 if (res.notification != args.notify_types)
8208 pdev->nocache = 1;
8209
8210 dprintk("<-- %s status=%d\n", __func__, status);
8211
8212 return status;
8213 }
8214
8215 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8216 struct pnfs_device *pdev,
8217 struct rpc_cred *cred)
8218 {
8219 struct nfs4_exception exception = { };
8220 int err;
8221
8222 do {
8223 err = nfs4_handle_exception(server,
8224 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8225 &exception);
8226 } while (exception.retry);
8227 return err;
8228 }
8229 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8230
8231 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8232 {
8233 struct nfs4_layoutcommit_data *data = calldata;
8234 struct nfs_server *server = NFS_SERVER(data->args.inode);
8235 struct nfs4_session *session = nfs4_get_session(server);
8236
8237 nfs41_setup_sequence(session,
8238 &data->args.seq_args,
8239 &data->res.seq_res,
8240 task);
8241 }
8242
8243 static void
8244 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8245 {
8246 struct nfs4_layoutcommit_data *data = calldata;
8247 struct nfs_server *server = NFS_SERVER(data->args.inode);
8248
8249 if (!nfs41_sequence_done(task, &data->res.seq_res))
8250 return;
8251
8252 switch (task->tk_status) { /* Just ignore these failures */
8253 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8254 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8255 case -NFS4ERR_BADLAYOUT: /* no layout */
8256 case -NFS4ERR_GRACE: /* loca_recalim always false */
8257 task->tk_status = 0;
8258 case 0:
8259 break;
8260 default:
8261 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8262 rpc_restart_call_prepare(task);
8263 return;
8264 }
8265 }
8266 }
8267
8268 static void nfs4_layoutcommit_release(void *calldata)
8269 {
8270 struct nfs4_layoutcommit_data *data = calldata;
8271
8272 pnfs_cleanup_layoutcommit(data);
8273 nfs_post_op_update_inode_force_wcc(data->args.inode,
8274 data->res.fattr);
8275 put_rpccred(data->cred);
8276 nfs_iput_and_deactive(data->inode);
8277 kfree(data);
8278 }
8279
8280 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8281 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8282 .rpc_call_done = nfs4_layoutcommit_done,
8283 .rpc_release = nfs4_layoutcommit_release,
8284 };
8285
8286 int
8287 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8288 {
8289 struct rpc_message msg = {
8290 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8291 .rpc_argp = &data->args,
8292 .rpc_resp = &data->res,
8293 .rpc_cred = data->cred,
8294 };
8295 struct rpc_task_setup task_setup_data = {
8296 .task = &data->task,
8297 .rpc_client = NFS_CLIENT(data->args.inode),
8298 .rpc_message = &msg,
8299 .callback_ops = &nfs4_layoutcommit_ops,
8300 .callback_data = data,
8301 };
8302 struct rpc_task *task;
8303 int status = 0;
8304
8305 dprintk("NFS: initiating layoutcommit call. sync %d "
8306 "lbw: %llu inode %lu\n", sync,
8307 data->args.lastbytewritten,
8308 data->args.inode->i_ino);
8309
8310 if (!sync) {
8311 data->inode = nfs_igrab_and_active(data->args.inode);
8312 if (data->inode == NULL) {
8313 nfs4_layoutcommit_release(data);
8314 return -EAGAIN;
8315 }
8316 task_setup_data.flags = RPC_TASK_ASYNC;
8317 }
8318 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8319 task = rpc_run_task(&task_setup_data);
8320 if (IS_ERR(task))
8321 return PTR_ERR(task);
8322 if (sync)
8323 status = task->tk_status;
8324 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
8325 dprintk("%s: status %d\n", __func__, status);
8326 rpc_put_task(task);
8327 return status;
8328 }
8329
8330 /**
8331 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8332 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8333 */
8334 static int
8335 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8336 struct nfs_fsinfo *info,
8337 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8338 {
8339 struct nfs41_secinfo_no_name_args args = {
8340 .style = SECINFO_STYLE_CURRENT_FH,
8341 };
8342 struct nfs4_secinfo_res res = {
8343 .flavors = flavors,
8344 };
8345 struct rpc_message msg = {
8346 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8347 .rpc_argp = &args,
8348 .rpc_resp = &res,
8349 };
8350 struct rpc_clnt *clnt = server->client;
8351 struct rpc_cred *cred = NULL;
8352 int status;
8353
8354 if (use_integrity) {
8355 clnt = server->nfs_client->cl_rpcclient;
8356 cred = nfs4_get_clid_cred(server->nfs_client);
8357 msg.rpc_cred = cred;
8358 }
8359
8360 dprintk("--> %s\n", __func__);
8361 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8362 &res.seq_res, 0);
8363 dprintk("<-- %s status=%d\n", __func__, status);
8364
8365 if (cred)
8366 put_rpccred(cred);
8367
8368 return status;
8369 }
8370
8371 static int
8372 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8373 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8374 {
8375 struct nfs4_exception exception = { };
8376 int err;
8377 do {
8378 /* first try using integrity protection */
8379 err = -NFS4ERR_WRONGSEC;
8380
8381 /* try to use integrity protection with machine cred */
8382 if (_nfs4_is_integrity_protected(server->nfs_client))
8383 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8384 flavors, true);
8385
8386 /*
8387 * if unable to use integrity protection, or SECINFO with
8388 * integrity protection returns NFS4ERR_WRONGSEC (which is
8389 * disallowed by spec, but exists in deployed servers) use
8390 * the current filesystem's rpc_client and the user cred.
8391 */
8392 if (err == -NFS4ERR_WRONGSEC)
8393 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8394 flavors, false);
8395
8396 switch (err) {
8397 case 0:
8398 case -NFS4ERR_WRONGSEC:
8399 case -ENOTSUPP:
8400 goto out;
8401 default:
8402 err = nfs4_handle_exception(server, err, &exception);
8403 }
8404 } while (exception.retry);
8405 out:
8406 return err;
8407 }
8408
8409 static int
8410 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8411 struct nfs_fsinfo *info)
8412 {
8413 int err;
8414 struct page *page;
8415 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8416 struct nfs4_secinfo_flavors *flavors;
8417 struct nfs4_secinfo4 *secinfo;
8418 int i;
8419
8420 page = alloc_page(GFP_KERNEL);
8421 if (!page) {
8422 err = -ENOMEM;
8423 goto out;
8424 }
8425
8426 flavors = page_address(page);
8427 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8428
8429 /*
8430 * Fall back on "guess and check" method if
8431 * the server doesn't support SECINFO_NO_NAME
8432 */
8433 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8434 err = nfs4_find_root_sec(server, fhandle, info);
8435 goto out_freepage;
8436 }
8437 if (err)
8438 goto out_freepage;
8439
8440 for (i = 0; i < flavors->num_flavors; i++) {
8441 secinfo = &flavors->flavors[i];
8442
8443 switch (secinfo->flavor) {
8444 case RPC_AUTH_NULL:
8445 case RPC_AUTH_UNIX:
8446 case RPC_AUTH_GSS:
8447 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8448 &secinfo->flavor_info);
8449 break;
8450 default:
8451 flavor = RPC_AUTH_MAXFLAVOR;
8452 break;
8453 }
8454
8455 if (!nfs_auth_info_match(&server->auth_info, flavor))
8456 flavor = RPC_AUTH_MAXFLAVOR;
8457
8458 if (flavor != RPC_AUTH_MAXFLAVOR) {
8459 err = nfs4_lookup_root_sec(server, fhandle,
8460 info, flavor);
8461 if (!err)
8462 break;
8463 }
8464 }
8465
8466 if (flavor == RPC_AUTH_MAXFLAVOR)
8467 err = -EPERM;
8468
8469 out_freepage:
8470 put_page(page);
8471 if (err == -EACCES)
8472 return -EPERM;
8473 out:
8474 return err;
8475 }
8476
8477 static int _nfs41_test_stateid(struct nfs_server *server,
8478 nfs4_stateid *stateid,
8479 struct rpc_cred *cred)
8480 {
8481 int status;
8482 struct nfs41_test_stateid_args args = {
8483 .stateid = stateid,
8484 };
8485 struct nfs41_test_stateid_res res;
8486 struct rpc_message msg = {
8487 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8488 .rpc_argp = &args,
8489 .rpc_resp = &res,
8490 .rpc_cred = cred,
8491 };
8492 struct rpc_clnt *rpc_client = server->client;
8493
8494 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8495 &rpc_client, &msg);
8496
8497 dprintk("NFS call test_stateid %p\n", stateid);
8498 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8499 nfs4_set_sequence_privileged(&args.seq_args);
8500 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8501 &args.seq_args, &res.seq_res);
8502 if (status != NFS_OK) {
8503 dprintk("NFS reply test_stateid: failed, %d\n", status);
8504 return status;
8505 }
8506 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8507 return -res.status;
8508 }
8509
8510 /**
8511 * nfs41_test_stateid - perform a TEST_STATEID operation
8512 *
8513 * @server: server / transport on which to perform the operation
8514 * @stateid: state ID to test
8515 * @cred: credential
8516 *
8517 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8518 * Otherwise a negative NFS4ERR value is returned if the operation
8519 * failed or the state ID is not currently valid.
8520 */
8521 static int nfs41_test_stateid(struct nfs_server *server,
8522 nfs4_stateid *stateid,
8523 struct rpc_cred *cred)
8524 {
8525 struct nfs4_exception exception = { };
8526 int err;
8527 do {
8528 err = _nfs41_test_stateid(server, stateid, cred);
8529 if (err != -NFS4ERR_DELAY)
8530 break;
8531 nfs4_handle_exception(server, err, &exception);
8532 } while (exception.retry);
8533 return err;
8534 }
8535
8536 struct nfs_free_stateid_data {
8537 struct nfs_server *server;
8538 struct nfs41_free_stateid_args args;
8539 struct nfs41_free_stateid_res res;
8540 };
8541
8542 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8543 {
8544 struct nfs_free_stateid_data *data = calldata;
8545 nfs41_setup_sequence(nfs4_get_session(data->server),
8546 &data->args.seq_args,
8547 &data->res.seq_res,
8548 task);
8549 }
8550
8551 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8552 {
8553 struct nfs_free_stateid_data *data = calldata;
8554
8555 nfs41_sequence_done(task, &data->res.seq_res);
8556
8557 switch (task->tk_status) {
8558 case -NFS4ERR_DELAY:
8559 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8560 rpc_restart_call_prepare(task);
8561 }
8562 }
8563
8564 static void nfs41_free_stateid_release(void *calldata)
8565 {
8566 kfree(calldata);
8567 }
8568
8569 static const struct rpc_call_ops nfs41_free_stateid_ops = {
8570 .rpc_call_prepare = nfs41_free_stateid_prepare,
8571 .rpc_call_done = nfs41_free_stateid_done,
8572 .rpc_release = nfs41_free_stateid_release,
8573 };
8574
8575 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8576 nfs4_stateid *stateid,
8577 struct rpc_cred *cred,
8578 bool privileged)
8579 {
8580 struct rpc_message msg = {
8581 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8582 .rpc_cred = cred,
8583 };
8584 struct rpc_task_setup task_setup = {
8585 .rpc_client = server->client,
8586 .rpc_message = &msg,
8587 .callback_ops = &nfs41_free_stateid_ops,
8588 .flags = RPC_TASK_ASYNC,
8589 };
8590 struct nfs_free_stateid_data *data;
8591
8592 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8593 &task_setup.rpc_client, &msg);
8594
8595 dprintk("NFS call free_stateid %p\n", stateid);
8596 data = kmalloc(sizeof(*data), GFP_NOFS);
8597 if (!data)
8598 return ERR_PTR(-ENOMEM);
8599 data->server = server;
8600 nfs4_stateid_copy(&data->args.stateid, stateid);
8601
8602 task_setup.callback_data = data;
8603
8604 msg.rpc_argp = &data->args;
8605 msg.rpc_resp = &data->res;
8606 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8607 if (privileged)
8608 nfs4_set_sequence_privileged(&data->args.seq_args);
8609
8610 return rpc_run_task(&task_setup);
8611 }
8612
8613 /**
8614 * nfs41_free_stateid - perform a FREE_STATEID operation
8615 *
8616 * @server: server / transport on which to perform the operation
8617 * @stateid: state ID to release
8618 * @cred: credential
8619 *
8620 * Returns NFS_OK if the server freed "stateid". Otherwise a
8621 * negative NFS4ERR value is returned.
8622 */
8623 static int nfs41_free_stateid(struct nfs_server *server,
8624 nfs4_stateid *stateid,
8625 struct rpc_cred *cred)
8626 {
8627 struct rpc_task *task;
8628 int ret;
8629
8630 task = _nfs41_free_stateid(server, stateid, cred, true);
8631 if (IS_ERR(task))
8632 return PTR_ERR(task);
8633 ret = rpc_wait_for_completion_task(task);
8634 if (!ret)
8635 ret = task->tk_status;
8636 rpc_put_task(task);
8637 return ret;
8638 }
8639
8640 static void
8641 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8642 {
8643 struct rpc_task *task;
8644 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8645
8646 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8647 nfs4_free_lock_state(server, lsp);
8648 if (IS_ERR(task))
8649 return;
8650 rpc_put_task(task);
8651 }
8652
8653 static bool nfs41_match_stateid(const nfs4_stateid *s1,
8654 const nfs4_stateid *s2)
8655 {
8656 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8657 return false;
8658
8659 if (s1->seqid == s2->seqid)
8660 return true;
8661 if (s1->seqid == 0 || s2->seqid == 0)
8662 return true;
8663
8664 return false;
8665 }
8666
8667 #endif /* CONFIG_NFS_V4_1 */
8668
8669 static bool nfs4_match_stateid(const nfs4_stateid *s1,
8670 const nfs4_stateid *s2)
8671 {
8672 return nfs4_stateid_match(s1, s2);
8673 }
8674
8675
8676 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8677 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8678 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8679 .recover_open = nfs4_open_reclaim,
8680 .recover_lock = nfs4_lock_reclaim,
8681 .establish_clid = nfs4_init_clientid,
8682 .detect_trunking = nfs40_discover_server_trunking,
8683 };
8684
8685 #if defined(CONFIG_NFS_V4_1)
8686 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8687 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8688 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8689 .recover_open = nfs4_open_reclaim,
8690 .recover_lock = nfs4_lock_reclaim,
8691 .establish_clid = nfs41_init_clientid,
8692 .reclaim_complete = nfs41_proc_reclaim_complete,
8693 .detect_trunking = nfs41_discover_server_trunking,
8694 };
8695 #endif /* CONFIG_NFS_V4_1 */
8696
8697 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8698 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8699 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8700 .recover_open = nfs40_open_expired,
8701 .recover_lock = nfs4_lock_expired,
8702 .establish_clid = nfs4_init_clientid,
8703 };
8704
8705 #if defined(CONFIG_NFS_V4_1)
8706 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8707 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8708 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8709 .recover_open = nfs41_open_expired,
8710 .recover_lock = nfs41_lock_expired,
8711 .establish_clid = nfs41_init_clientid,
8712 };
8713 #endif /* CONFIG_NFS_V4_1 */
8714
8715 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8716 .sched_state_renewal = nfs4_proc_async_renew,
8717 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8718 .renew_lease = nfs4_proc_renew,
8719 };
8720
8721 #if defined(CONFIG_NFS_V4_1)
8722 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8723 .sched_state_renewal = nfs41_proc_async_sequence,
8724 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8725 .renew_lease = nfs4_proc_sequence,
8726 };
8727 #endif
8728
8729 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8730 .get_locations = _nfs40_proc_get_locations,
8731 .fsid_present = _nfs40_proc_fsid_present,
8732 };
8733
8734 #if defined(CONFIG_NFS_V4_1)
8735 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8736 .get_locations = _nfs41_proc_get_locations,
8737 .fsid_present = _nfs41_proc_fsid_present,
8738 };
8739 #endif /* CONFIG_NFS_V4_1 */
8740
8741 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8742 .minor_version = 0,
8743 .init_caps = NFS_CAP_READDIRPLUS
8744 | NFS_CAP_ATOMIC_OPEN
8745 | NFS_CAP_POSIX_LOCK,
8746 .init_client = nfs40_init_client,
8747 .shutdown_client = nfs40_shutdown_client,
8748 .match_stateid = nfs4_match_stateid,
8749 .find_root_sec = nfs4_find_root_sec,
8750 .free_lock_state = nfs4_release_lockowner,
8751 .alloc_seqid = nfs_alloc_seqid,
8752 .call_sync_ops = &nfs40_call_sync_ops,
8753 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8754 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8755 .state_renewal_ops = &nfs40_state_renewal_ops,
8756 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8757 };
8758
8759 #if defined(CONFIG_NFS_V4_1)
8760 static struct nfs_seqid *
8761 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8762 {
8763 return NULL;
8764 }
8765
8766 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8767 .minor_version = 1,
8768 .init_caps = NFS_CAP_READDIRPLUS
8769 | NFS_CAP_ATOMIC_OPEN
8770 | NFS_CAP_POSIX_LOCK
8771 | NFS_CAP_STATEID_NFSV41
8772 | NFS_CAP_ATOMIC_OPEN_V1,
8773 .init_client = nfs41_init_client,
8774 .shutdown_client = nfs41_shutdown_client,
8775 .match_stateid = nfs41_match_stateid,
8776 .find_root_sec = nfs41_find_root_sec,
8777 .free_lock_state = nfs41_free_lock_state,
8778 .alloc_seqid = nfs_alloc_no_seqid,
8779 .call_sync_ops = &nfs41_call_sync_ops,
8780 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8781 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8782 .state_renewal_ops = &nfs41_state_renewal_ops,
8783 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8784 };
8785 #endif
8786
8787 #if defined(CONFIG_NFS_V4_2)
8788 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8789 .minor_version = 2,
8790 .init_caps = NFS_CAP_READDIRPLUS
8791 | NFS_CAP_ATOMIC_OPEN
8792 | NFS_CAP_POSIX_LOCK
8793 | NFS_CAP_STATEID_NFSV41
8794 | NFS_CAP_ATOMIC_OPEN_V1
8795 | NFS_CAP_ALLOCATE
8796 | NFS_CAP_DEALLOCATE
8797 | NFS_CAP_SEEK
8798 | NFS_CAP_LAYOUTSTATS
8799 | NFS_CAP_CLONE,
8800 .init_client = nfs41_init_client,
8801 .shutdown_client = nfs41_shutdown_client,
8802 .match_stateid = nfs41_match_stateid,
8803 .find_root_sec = nfs41_find_root_sec,
8804 .free_lock_state = nfs41_free_lock_state,
8805 .call_sync_ops = &nfs41_call_sync_ops,
8806 .alloc_seqid = nfs_alloc_no_seqid,
8807 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8808 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8809 .state_renewal_ops = &nfs41_state_renewal_ops,
8810 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8811 };
8812 #endif
8813
8814 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8815 [0] = &nfs_v4_0_minor_ops,
8816 #if defined(CONFIG_NFS_V4_1)
8817 [1] = &nfs_v4_1_minor_ops,
8818 #endif
8819 #if defined(CONFIG_NFS_V4_2)
8820 [2] = &nfs_v4_2_minor_ops,
8821 #endif
8822 };
8823
8824 ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
8825 {
8826 ssize_t error, error2;
8827
8828 error = generic_listxattr(dentry, list, size);
8829 if (error < 0)
8830 return error;
8831 if (list) {
8832 list += error;
8833 size -= error;
8834 }
8835
8836 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
8837 if (error2 < 0)
8838 return error2;
8839 return error + error2;
8840 }
8841
8842 static const struct inode_operations nfs4_dir_inode_operations = {
8843 .create = nfs_create,
8844 .lookup = nfs_lookup,
8845 .atomic_open = nfs_atomic_open,
8846 .link = nfs_link,
8847 .unlink = nfs_unlink,
8848 .symlink = nfs_symlink,
8849 .mkdir = nfs_mkdir,
8850 .rmdir = nfs_rmdir,
8851 .mknod = nfs_mknod,
8852 .rename = nfs_rename,
8853 .permission = nfs_permission,
8854 .getattr = nfs_getattr,
8855 .setattr = nfs_setattr,
8856 .getxattr = generic_getxattr,
8857 .setxattr = generic_setxattr,
8858 .listxattr = nfs4_listxattr,
8859 .removexattr = generic_removexattr,
8860 };
8861
8862 static const struct inode_operations nfs4_file_inode_operations = {
8863 .permission = nfs_permission,
8864 .getattr = nfs_getattr,
8865 .setattr = nfs_setattr,
8866 .getxattr = generic_getxattr,
8867 .setxattr = generic_setxattr,
8868 .listxattr = nfs4_listxattr,
8869 .removexattr = generic_removexattr,
8870 };
8871
8872 const struct nfs_rpc_ops nfs_v4_clientops = {
8873 .version = 4, /* protocol version */
8874 .dentry_ops = &nfs4_dentry_operations,
8875 .dir_inode_ops = &nfs4_dir_inode_operations,
8876 .file_inode_ops = &nfs4_file_inode_operations,
8877 .file_ops = &nfs4_file_operations,
8878 .getroot = nfs4_proc_get_root,
8879 .submount = nfs4_submount,
8880 .try_mount = nfs4_try_mount,
8881 .getattr = nfs4_proc_getattr,
8882 .setattr = nfs4_proc_setattr,
8883 .lookup = nfs4_proc_lookup,
8884 .access = nfs4_proc_access,
8885 .readlink = nfs4_proc_readlink,
8886 .create = nfs4_proc_create,
8887 .remove = nfs4_proc_remove,
8888 .unlink_setup = nfs4_proc_unlink_setup,
8889 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8890 .unlink_done = nfs4_proc_unlink_done,
8891 .rename_setup = nfs4_proc_rename_setup,
8892 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8893 .rename_done = nfs4_proc_rename_done,
8894 .link = nfs4_proc_link,
8895 .symlink = nfs4_proc_symlink,
8896 .mkdir = nfs4_proc_mkdir,
8897 .rmdir = nfs4_proc_remove,
8898 .readdir = nfs4_proc_readdir,
8899 .mknod = nfs4_proc_mknod,
8900 .statfs = nfs4_proc_statfs,
8901 .fsinfo = nfs4_proc_fsinfo,
8902 .pathconf = nfs4_proc_pathconf,
8903 .set_capabilities = nfs4_server_capabilities,
8904 .decode_dirent = nfs4_decode_dirent,
8905 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8906 .read_setup = nfs4_proc_read_setup,
8907 .read_done = nfs4_read_done,
8908 .write_setup = nfs4_proc_write_setup,
8909 .write_done = nfs4_write_done,
8910 .commit_setup = nfs4_proc_commit_setup,
8911 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
8912 .commit_done = nfs4_commit_done,
8913 .lock = nfs4_proc_lock,
8914 .clear_acl_cache = nfs4_zap_acl_attr,
8915 .close_context = nfs4_close_context,
8916 .open_context = nfs4_atomic_open,
8917 .have_delegation = nfs4_have_delegation,
8918 .return_delegation = nfs4_inode_return_delegation,
8919 .alloc_client = nfs4_alloc_client,
8920 .init_client = nfs4_init_client,
8921 .free_client = nfs4_free_client,
8922 .create_server = nfs4_create_server,
8923 .clone_server = nfs_clone_server,
8924 };
8925
8926 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
8927 .name = XATTR_NAME_NFSV4_ACL,
8928 .list = nfs4_xattr_list_nfs4_acl,
8929 .get = nfs4_xattr_get_nfs4_acl,
8930 .set = nfs4_xattr_set_nfs4_acl,
8931 };
8932
8933 const struct xattr_handler *nfs4_xattr_handlers[] = {
8934 &nfs4_xattr_nfs4_acl_handler,
8935 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
8936 &nfs4_xattr_nfs4_label_handler,
8937 #endif
8938 NULL
8939 };
8940
8941 /*
8942 * Local variables:
8943 * c-basic-offset: 8
8944 * End:
8945 */