]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/nfs/nfs4proc.c
Merge tag 'md/4.2-fixes' of git://neil.brown.name/md
[mirror_ubuntu-bionic-kernel.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/file.h>
42 #include <linux/string.h>
43 #include <linux/ratelimit.h>
44 #include <linux/printk.h>
45 #include <linux/slab.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4idmap.h"
67 #include "nfs4session.h"
68 #include "fscache.h"
69
70 #include "nfs4trace.h"
71
72 #define NFSDBG_FACILITY NFSDBG_PROC
73
74 #define NFS4_POLL_RETRY_MIN (HZ/10)
75 #define NFS4_POLL_RETRY_MAX (15*HZ)
76
77 struct nfs4_opendata;
78 static int _nfs4_proc_open(struct nfs4_opendata *data);
79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *, long *);
82 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
83 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
84 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
85 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
86 struct nfs_fattr *fattr, struct iattr *sattr,
87 struct nfs4_state *state, struct nfs4_label *ilabel,
88 struct nfs4_label *olabel);
89 #ifdef CONFIG_NFS_V4_1
90 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
91 struct rpc_cred *);
92 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
93 struct rpc_cred *);
94 #endif
95
96 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
97 static inline struct nfs4_label *
98 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
99 struct iattr *sattr, struct nfs4_label *label)
100 {
101 int err;
102
103 if (label == NULL)
104 return NULL;
105
106 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
107 return NULL;
108
109 err = security_dentry_init_security(dentry, sattr->ia_mode,
110 &dentry->d_name, (void **)&label->label, &label->len);
111 if (err == 0)
112 return label;
113
114 return NULL;
115 }
116 static inline void
117 nfs4_label_release_security(struct nfs4_label *label)
118 {
119 if (label)
120 security_release_secctx(label->label, label->len);
121 }
122 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
123 {
124 if (label)
125 return server->attr_bitmask;
126
127 return server->attr_bitmask_nl;
128 }
129 #else
130 static inline struct nfs4_label *
131 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
132 struct iattr *sattr, struct nfs4_label *l)
133 { return NULL; }
134 static inline void
135 nfs4_label_release_security(struct nfs4_label *label)
136 { return; }
137 static inline u32 *
138 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
139 { return server->attr_bitmask; }
140 #endif
141
142 /* Prevent leaks of NFSv4 errors into userland */
143 static int nfs4_map_errors(int err)
144 {
145 if (err >= -1000)
146 return err;
147 switch (err) {
148 case -NFS4ERR_RESOURCE:
149 case -NFS4ERR_LAYOUTTRYLATER:
150 case -NFS4ERR_RECALLCONFLICT:
151 return -EREMOTEIO;
152 case -NFS4ERR_WRONGSEC:
153 case -NFS4ERR_WRONG_CRED:
154 return -EPERM;
155 case -NFS4ERR_BADOWNER:
156 case -NFS4ERR_BADNAME:
157 return -EINVAL;
158 case -NFS4ERR_SHARE_DENIED:
159 return -EACCES;
160 case -NFS4ERR_MINOR_VERS_MISMATCH:
161 return -EPROTONOSUPPORT;
162 case -NFS4ERR_FILE_OPEN:
163 return -EBUSY;
164 default:
165 dprintk("%s could not handle NFSv4 error %d\n",
166 __func__, -err);
167 break;
168 }
169 return -EIO;
170 }
171
172 /*
173 * This is our standard bitmap for GETATTR requests.
174 */
175 const u32 nfs4_fattr_bitmap[3] = {
176 FATTR4_WORD0_TYPE
177 | FATTR4_WORD0_CHANGE
178 | FATTR4_WORD0_SIZE
179 | FATTR4_WORD0_FSID
180 | FATTR4_WORD0_FILEID,
181 FATTR4_WORD1_MODE
182 | FATTR4_WORD1_NUMLINKS
183 | FATTR4_WORD1_OWNER
184 | FATTR4_WORD1_OWNER_GROUP
185 | FATTR4_WORD1_RAWDEV
186 | FATTR4_WORD1_SPACE_USED
187 | FATTR4_WORD1_TIME_ACCESS
188 | FATTR4_WORD1_TIME_METADATA
189 | FATTR4_WORD1_TIME_MODIFY
190 | FATTR4_WORD1_MOUNTED_ON_FILEID,
191 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
192 FATTR4_WORD2_SECURITY_LABEL
193 #endif
194 };
195
196 static const u32 nfs4_pnfs_open_bitmap[3] = {
197 FATTR4_WORD0_TYPE
198 | FATTR4_WORD0_CHANGE
199 | FATTR4_WORD0_SIZE
200 | FATTR4_WORD0_FSID
201 | FATTR4_WORD0_FILEID,
202 FATTR4_WORD1_MODE
203 | FATTR4_WORD1_NUMLINKS
204 | FATTR4_WORD1_OWNER
205 | FATTR4_WORD1_OWNER_GROUP
206 | FATTR4_WORD1_RAWDEV
207 | FATTR4_WORD1_SPACE_USED
208 | FATTR4_WORD1_TIME_ACCESS
209 | FATTR4_WORD1_TIME_METADATA
210 | FATTR4_WORD1_TIME_MODIFY,
211 FATTR4_WORD2_MDSTHRESHOLD
212 };
213
214 static const u32 nfs4_open_noattr_bitmap[3] = {
215 FATTR4_WORD0_TYPE
216 | FATTR4_WORD0_CHANGE
217 | FATTR4_WORD0_FILEID,
218 };
219
220 const u32 nfs4_statfs_bitmap[3] = {
221 FATTR4_WORD0_FILES_AVAIL
222 | FATTR4_WORD0_FILES_FREE
223 | FATTR4_WORD0_FILES_TOTAL,
224 FATTR4_WORD1_SPACE_AVAIL
225 | FATTR4_WORD1_SPACE_FREE
226 | FATTR4_WORD1_SPACE_TOTAL
227 };
228
229 const u32 nfs4_pathconf_bitmap[3] = {
230 FATTR4_WORD0_MAXLINK
231 | FATTR4_WORD0_MAXNAME,
232 0
233 };
234
235 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
236 | FATTR4_WORD0_MAXREAD
237 | FATTR4_WORD0_MAXWRITE
238 | FATTR4_WORD0_LEASE_TIME,
239 FATTR4_WORD1_TIME_DELTA
240 | FATTR4_WORD1_FS_LAYOUT_TYPES,
241 FATTR4_WORD2_LAYOUT_BLKSIZE
242 };
243
244 const u32 nfs4_fs_locations_bitmap[3] = {
245 FATTR4_WORD0_TYPE
246 | FATTR4_WORD0_CHANGE
247 | FATTR4_WORD0_SIZE
248 | FATTR4_WORD0_FSID
249 | FATTR4_WORD0_FILEID
250 | FATTR4_WORD0_FS_LOCATIONS,
251 FATTR4_WORD1_MODE
252 | FATTR4_WORD1_NUMLINKS
253 | FATTR4_WORD1_OWNER
254 | FATTR4_WORD1_OWNER_GROUP
255 | FATTR4_WORD1_RAWDEV
256 | FATTR4_WORD1_SPACE_USED
257 | FATTR4_WORD1_TIME_ACCESS
258 | FATTR4_WORD1_TIME_METADATA
259 | FATTR4_WORD1_TIME_MODIFY
260 | FATTR4_WORD1_MOUNTED_ON_FILEID,
261 };
262
263 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
264 struct nfs4_readdir_arg *readdir)
265 {
266 __be32 *start, *p;
267
268 if (cookie > 2) {
269 readdir->cookie = cookie;
270 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
271 return;
272 }
273
274 readdir->cookie = 0;
275 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
276 if (cookie == 2)
277 return;
278
279 /*
280 * NFSv4 servers do not return entries for '.' and '..'
281 * Therefore, we fake these entries here. We let '.'
282 * have cookie 0 and '..' have cookie 1. Note that
283 * when talking to the server, we always send cookie 0
284 * instead of 1 or 2.
285 */
286 start = p = kmap_atomic(*readdir->pages);
287
288 if (cookie == 0) {
289 *p++ = xdr_one; /* next */
290 *p++ = xdr_zero; /* cookie, first word */
291 *p++ = xdr_one; /* cookie, second word */
292 *p++ = xdr_one; /* entry len */
293 memcpy(p, ".\0\0\0", 4); /* entry */
294 p++;
295 *p++ = xdr_one; /* bitmap length */
296 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
297 *p++ = htonl(8); /* attribute buffer length */
298 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
299 }
300
301 *p++ = xdr_one; /* next */
302 *p++ = xdr_zero; /* cookie, first word */
303 *p++ = xdr_two; /* cookie, second word */
304 *p++ = xdr_two; /* entry len */
305 memcpy(p, "..\0\0", 4); /* entry */
306 p++;
307 *p++ = xdr_one; /* bitmap length */
308 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
309 *p++ = htonl(8); /* attribute buffer length */
310 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
311
312 readdir->pgbase = (char *)p - (char *)start;
313 readdir->count -= readdir->pgbase;
314 kunmap_atomic(start);
315 }
316
317 static long nfs4_update_delay(long *timeout)
318 {
319 long ret;
320 if (!timeout)
321 return NFS4_POLL_RETRY_MAX;
322 if (*timeout <= 0)
323 *timeout = NFS4_POLL_RETRY_MIN;
324 if (*timeout > NFS4_POLL_RETRY_MAX)
325 *timeout = NFS4_POLL_RETRY_MAX;
326 ret = *timeout;
327 *timeout <<= 1;
328 return ret;
329 }
330
331 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
332 {
333 int res = 0;
334
335 might_sleep();
336
337 freezable_schedule_timeout_killable_unsafe(
338 nfs4_update_delay(timeout));
339 if (fatal_signal_pending(current))
340 res = -ERESTARTSYS;
341 return res;
342 }
343
344 /* This is the error handling routine for processes that are allowed
345 * to sleep.
346 */
347 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
348 {
349 struct nfs_client *clp = server->nfs_client;
350 struct nfs4_state *state = exception->state;
351 struct inode *inode = exception->inode;
352 int ret = errorcode;
353
354 exception->retry = 0;
355 switch(errorcode) {
356 case 0:
357 return 0;
358 case -NFS4ERR_OPENMODE:
359 case -NFS4ERR_DELEG_REVOKED:
360 case -NFS4ERR_ADMIN_REVOKED:
361 case -NFS4ERR_BAD_STATEID:
362 if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
363 nfs4_inode_return_delegation(inode);
364 exception->retry = 1;
365 return 0;
366 }
367 if (state == NULL)
368 break;
369 ret = nfs4_schedule_stateid_recovery(server, state);
370 if (ret < 0)
371 break;
372 goto wait_on_recovery;
373 case -NFS4ERR_EXPIRED:
374 if (state != NULL) {
375 ret = nfs4_schedule_stateid_recovery(server, state);
376 if (ret < 0)
377 break;
378 }
379 case -NFS4ERR_STALE_STATEID:
380 case -NFS4ERR_STALE_CLIENTID:
381 nfs4_schedule_lease_recovery(clp);
382 goto wait_on_recovery;
383 case -NFS4ERR_MOVED:
384 ret = nfs4_schedule_migration_recovery(server);
385 if (ret < 0)
386 break;
387 goto wait_on_recovery;
388 case -NFS4ERR_LEASE_MOVED:
389 nfs4_schedule_lease_moved_recovery(clp);
390 goto wait_on_recovery;
391 #if defined(CONFIG_NFS_V4_1)
392 case -NFS4ERR_BADSESSION:
393 case -NFS4ERR_BADSLOT:
394 case -NFS4ERR_BAD_HIGH_SLOT:
395 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
396 case -NFS4ERR_DEADSESSION:
397 case -NFS4ERR_SEQ_FALSE_RETRY:
398 case -NFS4ERR_SEQ_MISORDERED:
399 dprintk("%s ERROR: %d Reset session\n", __func__,
400 errorcode);
401 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
402 goto wait_on_recovery;
403 #endif /* defined(CONFIG_NFS_V4_1) */
404 case -NFS4ERR_FILE_OPEN:
405 if (exception->timeout > HZ) {
406 /* We have retried a decent amount, time to
407 * fail
408 */
409 ret = -EBUSY;
410 break;
411 }
412 case -NFS4ERR_GRACE:
413 case -NFS4ERR_DELAY:
414 ret = nfs4_delay(server->client, &exception->timeout);
415 if (ret != 0)
416 break;
417 case -NFS4ERR_RETRY_UNCACHED_REP:
418 case -NFS4ERR_OLD_STATEID:
419 exception->retry = 1;
420 break;
421 case -NFS4ERR_BADOWNER:
422 /* The following works around a Linux server bug! */
423 case -NFS4ERR_BADNAME:
424 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
425 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
426 exception->retry = 1;
427 printk(KERN_WARNING "NFS: v4 server %s "
428 "does not accept raw "
429 "uid/gids. "
430 "Reenabling the idmapper.\n",
431 server->nfs_client->cl_hostname);
432 }
433 }
434 /* We failed to handle the error */
435 return nfs4_map_errors(ret);
436 wait_on_recovery:
437 ret = nfs4_wait_clnt_recover(clp);
438 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
439 return -EIO;
440 if (ret == 0)
441 exception->retry = 1;
442 return ret;
443 }
444
445 /*
446 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
447 * or 'false' otherwise.
448 */
449 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
450 {
451 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
452
453 if (flavor == RPC_AUTH_GSS_KRB5I ||
454 flavor == RPC_AUTH_GSS_KRB5P)
455 return true;
456
457 return false;
458 }
459
460 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
461 {
462 spin_lock(&clp->cl_lock);
463 if (time_before(clp->cl_last_renewal,timestamp))
464 clp->cl_last_renewal = timestamp;
465 spin_unlock(&clp->cl_lock);
466 }
467
468 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
469 {
470 do_renew_lease(server->nfs_client, timestamp);
471 }
472
473 struct nfs4_call_sync_data {
474 const struct nfs_server *seq_server;
475 struct nfs4_sequence_args *seq_args;
476 struct nfs4_sequence_res *seq_res;
477 };
478
479 void nfs4_init_sequence(struct nfs4_sequence_args *args,
480 struct nfs4_sequence_res *res, int cache_reply)
481 {
482 args->sa_slot = NULL;
483 args->sa_cache_this = cache_reply;
484 args->sa_privileged = 0;
485
486 res->sr_slot = NULL;
487 }
488
489 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
490 {
491 args->sa_privileged = 1;
492 }
493
494 int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
495 struct nfs4_sequence_args *args,
496 struct nfs4_sequence_res *res,
497 struct rpc_task *task)
498 {
499 struct nfs4_slot *slot;
500
501 /* slot already allocated? */
502 if (res->sr_slot != NULL)
503 goto out_start;
504
505 spin_lock(&tbl->slot_tbl_lock);
506 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
507 goto out_sleep;
508
509 slot = nfs4_alloc_slot(tbl);
510 if (IS_ERR(slot)) {
511 if (slot == ERR_PTR(-ENOMEM))
512 task->tk_timeout = HZ >> 2;
513 goto out_sleep;
514 }
515 spin_unlock(&tbl->slot_tbl_lock);
516
517 args->sa_slot = slot;
518 res->sr_slot = slot;
519
520 out_start:
521 rpc_call_start(task);
522 return 0;
523
524 out_sleep:
525 if (args->sa_privileged)
526 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
527 NULL, RPC_PRIORITY_PRIVILEGED);
528 else
529 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
530 spin_unlock(&tbl->slot_tbl_lock);
531 return -EAGAIN;
532 }
533 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
534
535 static int nfs40_sequence_done(struct rpc_task *task,
536 struct nfs4_sequence_res *res)
537 {
538 struct nfs4_slot *slot = res->sr_slot;
539 struct nfs4_slot_table *tbl;
540
541 if (slot == NULL)
542 goto out;
543
544 tbl = slot->table;
545 spin_lock(&tbl->slot_tbl_lock);
546 if (!nfs41_wake_and_assign_slot(tbl, slot))
547 nfs4_free_slot(tbl, slot);
548 spin_unlock(&tbl->slot_tbl_lock);
549
550 res->sr_slot = NULL;
551 out:
552 return 1;
553 }
554
555 #if defined(CONFIG_NFS_V4_1)
556
557 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
558 {
559 struct nfs4_session *session;
560 struct nfs4_slot_table *tbl;
561 struct nfs4_slot *slot = res->sr_slot;
562 bool send_new_highest_used_slotid = false;
563
564 tbl = slot->table;
565 session = tbl->session;
566
567 spin_lock(&tbl->slot_tbl_lock);
568 /* Be nice to the server: try to ensure that the last transmitted
569 * value for highest_user_slotid <= target_highest_slotid
570 */
571 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
572 send_new_highest_used_slotid = true;
573
574 if (nfs41_wake_and_assign_slot(tbl, slot)) {
575 send_new_highest_used_slotid = false;
576 goto out_unlock;
577 }
578 nfs4_free_slot(tbl, slot);
579
580 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
581 send_new_highest_used_slotid = false;
582 out_unlock:
583 spin_unlock(&tbl->slot_tbl_lock);
584 res->sr_slot = NULL;
585 if (send_new_highest_used_slotid)
586 nfs41_server_notify_highest_slotid_update(session->clp);
587 }
588
589 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
590 {
591 struct nfs4_session *session;
592 struct nfs4_slot *slot = res->sr_slot;
593 struct nfs_client *clp;
594 bool interrupted = false;
595 int ret = 1;
596
597 if (slot == NULL)
598 goto out_noaction;
599 /* don't increment the sequence number if the task wasn't sent */
600 if (!RPC_WAS_SENT(task))
601 goto out;
602
603 session = slot->table->session;
604
605 if (slot->interrupted) {
606 slot->interrupted = 0;
607 interrupted = true;
608 }
609
610 trace_nfs4_sequence_done(session, res);
611 /* Check the SEQUENCE operation status */
612 switch (res->sr_status) {
613 case 0:
614 /* Update the slot's sequence and clientid lease timer */
615 ++slot->seq_nr;
616 clp = session->clp;
617 do_renew_lease(clp, res->sr_timestamp);
618 /* Check sequence flags */
619 if (res->sr_status_flags != 0)
620 nfs4_schedule_lease_recovery(clp);
621 nfs41_update_target_slotid(slot->table, slot, res);
622 break;
623 case 1:
624 /*
625 * sr_status remains 1 if an RPC level error occurred.
626 * The server may or may not have processed the sequence
627 * operation..
628 * Mark the slot as having hosted an interrupted RPC call.
629 */
630 slot->interrupted = 1;
631 goto out;
632 case -NFS4ERR_DELAY:
633 /* The server detected a resend of the RPC call and
634 * returned NFS4ERR_DELAY as per Section 2.10.6.2
635 * of RFC5661.
636 */
637 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
638 __func__,
639 slot->slot_nr,
640 slot->seq_nr);
641 goto out_retry;
642 case -NFS4ERR_BADSLOT:
643 /*
644 * The slot id we used was probably retired. Try again
645 * using a different slot id.
646 */
647 goto retry_nowait;
648 case -NFS4ERR_SEQ_MISORDERED:
649 /*
650 * Was the last operation on this sequence interrupted?
651 * If so, retry after bumping the sequence number.
652 */
653 if (interrupted) {
654 ++slot->seq_nr;
655 goto retry_nowait;
656 }
657 /*
658 * Could this slot have been previously retired?
659 * If so, then the server may be expecting seq_nr = 1!
660 */
661 if (slot->seq_nr != 1) {
662 slot->seq_nr = 1;
663 goto retry_nowait;
664 }
665 break;
666 case -NFS4ERR_SEQ_FALSE_RETRY:
667 ++slot->seq_nr;
668 goto retry_nowait;
669 default:
670 /* Just update the slot sequence no. */
671 ++slot->seq_nr;
672 }
673 out:
674 /* The session may be reset by one of the error handlers. */
675 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
676 nfs41_sequence_free_slot(res);
677 out_noaction:
678 return ret;
679 retry_nowait:
680 if (rpc_restart_call_prepare(task)) {
681 task->tk_status = 0;
682 ret = 0;
683 }
684 goto out;
685 out_retry:
686 if (!rpc_restart_call(task))
687 goto out;
688 rpc_delay(task, NFS4_POLL_RETRY_MAX);
689 return 0;
690 }
691 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
692
693 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
694 {
695 if (res->sr_slot == NULL)
696 return 1;
697 if (!res->sr_slot->table->session)
698 return nfs40_sequence_done(task, res);
699 return nfs41_sequence_done(task, res);
700 }
701 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
702
703 int nfs41_setup_sequence(struct nfs4_session *session,
704 struct nfs4_sequence_args *args,
705 struct nfs4_sequence_res *res,
706 struct rpc_task *task)
707 {
708 struct nfs4_slot *slot;
709 struct nfs4_slot_table *tbl;
710
711 dprintk("--> %s\n", __func__);
712 /* slot already allocated? */
713 if (res->sr_slot != NULL)
714 goto out_success;
715
716 tbl = &session->fc_slot_table;
717
718 task->tk_timeout = 0;
719
720 spin_lock(&tbl->slot_tbl_lock);
721 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
722 !args->sa_privileged) {
723 /* The state manager will wait until the slot table is empty */
724 dprintk("%s session is draining\n", __func__);
725 goto out_sleep;
726 }
727
728 slot = nfs4_alloc_slot(tbl);
729 if (IS_ERR(slot)) {
730 /* If out of memory, try again in 1/4 second */
731 if (slot == ERR_PTR(-ENOMEM))
732 task->tk_timeout = HZ >> 2;
733 dprintk("<-- %s: no free slots\n", __func__);
734 goto out_sleep;
735 }
736 spin_unlock(&tbl->slot_tbl_lock);
737
738 args->sa_slot = slot;
739
740 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
741 slot->slot_nr, slot->seq_nr);
742
743 res->sr_slot = slot;
744 res->sr_timestamp = jiffies;
745 res->sr_status_flags = 0;
746 /*
747 * sr_status is only set in decode_sequence, and so will remain
748 * set to 1 if an rpc level failure occurs.
749 */
750 res->sr_status = 1;
751 trace_nfs4_setup_sequence(session, args);
752 out_success:
753 rpc_call_start(task);
754 return 0;
755 out_sleep:
756 /* Privileged tasks are queued with top priority */
757 if (args->sa_privileged)
758 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
759 NULL, RPC_PRIORITY_PRIVILEGED);
760 else
761 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
762 spin_unlock(&tbl->slot_tbl_lock);
763 return -EAGAIN;
764 }
765 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
766
767 static int nfs4_setup_sequence(const struct nfs_server *server,
768 struct nfs4_sequence_args *args,
769 struct nfs4_sequence_res *res,
770 struct rpc_task *task)
771 {
772 struct nfs4_session *session = nfs4_get_session(server);
773 int ret = 0;
774
775 if (!session)
776 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
777 args, res, task);
778
779 dprintk("--> %s clp %p session %p sr_slot %u\n",
780 __func__, session->clp, session, res->sr_slot ?
781 res->sr_slot->slot_nr : NFS4_NO_SLOT);
782
783 ret = nfs41_setup_sequence(session, args, res, task);
784
785 dprintk("<-- %s status=%d\n", __func__, ret);
786 return ret;
787 }
788
789 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
790 {
791 struct nfs4_call_sync_data *data = calldata;
792 struct nfs4_session *session = nfs4_get_session(data->seq_server);
793
794 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
795
796 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
797 }
798
799 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
800 {
801 struct nfs4_call_sync_data *data = calldata;
802
803 nfs41_sequence_done(task, data->seq_res);
804 }
805
806 static const struct rpc_call_ops nfs41_call_sync_ops = {
807 .rpc_call_prepare = nfs41_call_sync_prepare,
808 .rpc_call_done = nfs41_call_sync_done,
809 };
810
811 #else /* !CONFIG_NFS_V4_1 */
812
813 static int nfs4_setup_sequence(const struct nfs_server *server,
814 struct nfs4_sequence_args *args,
815 struct nfs4_sequence_res *res,
816 struct rpc_task *task)
817 {
818 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
819 args, res, task);
820 }
821
822 int nfs4_sequence_done(struct rpc_task *task,
823 struct nfs4_sequence_res *res)
824 {
825 return nfs40_sequence_done(task, res);
826 }
827 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
828
829 #endif /* !CONFIG_NFS_V4_1 */
830
831 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
832 {
833 struct nfs4_call_sync_data *data = calldata;
834 nfs4_setup_sequence(data->seq_server,
835 data->seq_args, data->seq_res, task);
836 }
837
838 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
839 {
840 struct nfs4_call_sync_data *data = calldata;
841 nfs4_sequence_done(task, data->seq_res);
842 }
843
844 static const struct rpc_call_ops nfs40_call_sync_ops = {
845 .rpc_call_prepare = nfs40_call_sync_prepare,
846 .rpc_call_done = nfs40_call_sync_done,
847 };
848
849 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
850 struct nfs_server *server,
851 struct rpc_message *msg,
852 struct nfs4_sequence_args *args,
853 struct nfs4_sequence_res *res)
854 {
855 int ret;
856 struct rpc_task *task;
857 struct nfs_client *clp = server->nfs_client;
858 struct nfs4_call_sync_data data = {
859 .seq_server = server,
860 .seq_args = args,
861 .seq_res = res,
862 };
863 struct rpc_task_setup task_setup = {
864 .rpc_client = clnt,
865 .rpc_message = msg,
866 .callback_ops = clp->cl_mvops->call_sync_ops,
867 .callback_data = &data
868 };
869
870 task = rpc_run_task(&task_setup);
871 if (IS_ERR(task))
872 ret = PTR_ERR(task);
873 else {
874 ret = task->tk_status;
875 rpc_put_task(task);
876 }
877 return ret;
878 }
879
880 int nfs4_call_sync(struct rpc_clnt *clnt,
881 struct nfs_server *server,
882 struct rpc_message *msg,
883 struct nfs4_sequence_args *args,
884 struct nfs4_sequence_res *res,
885 int cache_reply)
886 {
887 nfs4_init_sequence(args, res, cache_reply);
888 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
889 }
890
891 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
892 {
893 struct nfs_inode *nfsi = NFS_I(dir);
894
895 spin_lock(&dir->i_lock);
896 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
897 if (!cinfo->atomic || cinfo->before != dir->i_version)
898 nfs_force_lookup_revalidate(dir);
899 dir->i_version = cinfo->after;
900 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
901 nfs_fscache_invalidate(dir);
902 spin_unlock(&dir->i_lock);
903 }
904
905 struct nfs4_opendata {
906 struct kref kref;
907 struct nfs_openargs o_arg;
908 struct nfs_openres o_res;
909 struct nfs_open_confirmargs c_arg;
910 struct nfs_open_confirmres c_res;
911 struct nfs4_string owner_name;
912 struct nfs4_string group_name;
913 struct nfs_fattr f_attr;
914 struct nfs4_label *f_label;
915 struct dentry *dir;
916 struct dentry *dentry;
917 struct nfs4_state_owner *owner;
918 struct nfs4_state *state;
919 struct iattr attrs;
920 unsigned long timestamp;
921 unsigned int rpc_done : 1;
922 unsigned int file_created : 1;
923 unsigned int is_recover : 1;
924 int rpc_status;
925 int cancelled;
926 };
927
928 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
929 int err, struct nfs4_exception *exception)
930 {
931 if (err != -EINVAL)
932 return false;
933 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
934 return false;
935 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
936 exception->retry = 1;
937 return true;
938 }
939
940 static u32
941 nfs4_map_atomic_open_share(struct nfs_server *server,
942 fmode_t fmode, int openflags)
943 {
944 u32 res = 0;
945
946 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
947 case FMODE_READ:
948 res = NFS4_SHARE_ACCESS_READ;
949 break;
950 case FMODE_WRITE:
951 res = NFS4_SHARE_ACCESS_WRITE;
952 break;
953 case FMODE_READ|FMODE_WRITE:
954 res = NFS4_SHARE_ACCESS_BOTH;
955 }
956 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
957 goto out;
958 /* Want no delegation if we're using O_DIRECT */
959 if (openflags & O_DIRECT)
960 res |= NFS4_SHARE_WANT_NO_DELEG;
961 out:
962 return res;
963 }
964
965 static enum open_claim_type4
966 nfs4_map_atomic_open_claim(struct nfs_server *server,
967 enum open_claim_type4 claim)
968 {
969 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
970 return claim;
971 switch (claim) {
972 default:
973 return claim;
974 case NFS4_OPEN_CLAIM_FH:
975 return NFS4_OPEN_CLAIM_NULL;
976 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
977 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
978 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
979 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
980 }
981 }
982
983 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
984 {
985 p->o_res.f_attr = &p->f_attr;
986 p->o_res.f_label = p->f_label;
987 p->o_res.seqid = p->o_arg.seqid;
988 p->c_res.seqid = p->c_arg.seqid;
989 p->o_res.server = p->o_arg.server;
990 p->o_res.access_request = p->o_arg.access;
991 nfs_fattr_init(&p->f_attr);
992 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
993 }
994
995 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
996 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
997 const struct iattr *attrs,
998 struct nfs4_label *label,
999 enum open_claim_type4 claim,
1000 gfp_t gfp_mask)
1001 {
1002 struct dentry *parent = dget_parent(dentry);
1003 struct inode *dir = d_inode(parent);
1004 struct nfs_server *server = NFS_SERVER(dir);
1005 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1006 struct nfs4_opendata *p;
1007
1008 p = kzalloc(sizeof(*p), gfp_mask);
1009 if (p == NULL)
1010 goto err;
1011
1012 p->f_label = nfs4_label_alloc(server, gfp_mask);
1013 if (IS_ERR(p->f_label))
1014 goto err_free_p;
1015
1016 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1017 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1018 if (IS_ERR(p->o_arg.seqid))
1019 goto err_free_label;
1020 nfs_sb_active(dentry->d_sb);
1021 p->dentry = dget(dentry);
1022 p->dir = parent;
1023 p->owner = sp;
1024 atomic_inc(&sp->so_count);
1025 p->o_arg.open_flags = flags;
1026 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1027 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1028 fmode, flags);
1029 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1030 * will return permission denied for all bits until close */
1031 if (!(flags & O_EXCL)) {
1032 /* ask server to check for all possible rights as results
1033 * are cached */
1034 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1035 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1036 }
1037 p->o_arg.clientid = server->nfs_client->cl_clientid;
1038 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1039 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1040 p->o_arg.name = &dentry->d_name;
1041 p->o_arg.server = server;
1042 p->o_arg.bitmask = nfs4_bitmask(server, label);
1043 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1044 p->o_arg.label = label;
1045 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1046 switch (p->o_arg.claim) {
1047 case NFS4_OPEN_CLAIM_NULL:
1048 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1049 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1050 p->o_arg.fh = NFS_FH(dir);
1051 break;
1052 case NFS4_OPEN_CLAIM_PREVIOUS:
1053 case NFS4_OPEN_CLAIM_FH:
1054 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1055 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1056 p->o_arg.fh = NFS_FH(d_inode(dentry));
1057 }
1058 if (attrs != NULL && attrs->ia_valid != 0) {
1059 __u32 verf[2];
1060
1061 p->o_arg.u.attrs = &p->attrs;
1062 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1063
1064 verf[0] = jiffies;
1065 verf[1] = current->pid;
1066 memcpy(p->o_arg.u.verifier.data, verf,
1067 sizeof(p->o_arg.u.verifier.data));
1068 }
1069 p->c_arg.fh = &p->o_res.fh;
1070 p->c_arg.stateid = &p->o_res.stateid;
1071 p->c_arg.seqid = p->o_arg.seqid;
1072 nfs4_init_opendata_res(p);
1073 kref_init(&p->kref);
1074 return p;
1075
1076 err_free_label:
1077 nfs4_label_free(p->f_label);
1078 err_free_p:
1079 kfree(p);
1080 err:
1081 dput(parent);
1082 return NULL;
1083 }
1084
1085 static void nfs4_opendata_free(struct kref *kref)
1086 {
1087 struct nfs4_opendata *p = container_of(kref,
1088 struct nfs4_opendata, kref);
1089 struct super_block *sb = p->dentry->d_sb;
1090
1091 nfs_free_seqid(p->o_arg.seqid);
1092 if (p->state != NULL)
1093 nfs4_put_open_state(p->state);
1094 nfs4_put_state_owner(p->owner);
1095
1096 nfs4_label_free(p->f_label);
1097
1098 dput(p->dir);
1099 dput(p->dentry);
1100 nfs_sb_deactive(sb);
1101 nfs_fattr_free_names(&p->f_attr);
1102 kfree(p->f_attr.mdsthreshold);
1103 kfree(p);
1104 }
1105
1106 static void nfs4_opendata_put(struct nfs4_opendata *p)
1107 {
1108 if (p != NULL)
1109 kref_put(&p->kref, nfs4_opendata_free);
1110 }
1111
1112 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1113 {
1114 int ret;
1115
1116 ret = rpc_wait_for_completion_task(task);
1117 return ret;
1118 }
1119
1120 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1121 {
1122 int ret = 0;
1123
1124 if (open_mode & (O_EXCL|O_TRUNC))
1125 goto out;
1126 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1127 case FMODE_READ:
1128 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1129 && state->n_rdonly != 0;
1130 break;
1131 case FMODE_WRITE:
1132 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1133 && state->n_wronly != 0;
1134 break;
1135 case FMODE_READ|FMODE_WRITE:
1136 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1137 && state->n_rdwr != 0;
1138 }
1139 out:
1140 return ret;
1141 }
1142
1143 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
1144 {
1145 if (delegation == NULL)
1146 return 0;
1147 if ((delegation->type & fmode) != fmode)
1148 return 0;
1149 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1150 return 0;
1151 nfs_mark_delegation_referenced(delegation);
1152 return 1;
1153 }
1154
1155 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1156 {
1157 switch (fmode) {
1158 case FMODE_WRITE:
1159 state->n_wronly++;
1160 break;
1161 case FMODE_READ:
1162 state->n_rdonly++;
1163 break;
1164 case FMODE_READ|FMODE_WRITE:
1165 state->n_rdwr++;
1166 }
1167 nfs4_state_set_mode_locked(state, state->state | fmode);
1168 }
1169
1170 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1171 {
1172 struct nfs_client *clp = state->owner->so_server->nfs_client;
1173 bool need_recover = false;
1174
1175 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1176 need_recover = true;
1177 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1178 need_recover = true;
1179 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1180 need_recover = true;
1181 if (need_recover)
1182 nfs4_state_mark_reclaim_nograce(clp, state);
1183 }
1184
1185 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1186 nfs4_stateid *stateid)
1187 {
1188 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1189 return true;
1190 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1191 nfs_test_and_clear_all_open_stateid(state);
1192 return true;
1193 }
1194 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1195 return true;
1196 return false;
1197 }
1198
1199 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1200 {
1201 if (state->n_wronly)
1202 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1203 if (state->n_rdonly)
1204 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1205 if (state->n_rdwr)
1206 set_bit(NFS_O_RDWR_STATE, &state->flags);
1207 }
1208
1209 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1210 nfs4_stateid *stateid, fmode_t fmode)
1211 {
1212 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1213 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1214 case FMODE_WRITE:
1215 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1216 break;
1217 case FMODE_READ:
1218 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1219 break;
1220 case 0:
1221 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1222 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1223 clear_bit(NFS_OPEN_STATE, &state->flags);
1224 }
1225 if (stateid == NULL)
1226 return;
1227 /* Handle races with OPEN */
1228 if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
1229 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1230 nfs_resync_open_stateid_locked(state);
1231 return;
1232 }
1233 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1234 nfs4_stateid_copy(&state->stateid, stateid);
1235 nfs4_stateid_copy(&state->open_stateid, stateid);
1236 }
1237
1238 static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1239 {
1240 write_seqlock(&state->seqlock);
1241 nfs_clear_open_stateid_locked(state, stateid, fmode);
1242 write_sequnlock(&state->seqlock);
1243 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1244 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1245 }
1246
1247 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1248 {
1249 switch (fmode) {
1250 case FMODE_READ:
1251 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1252 break;
1253 case FMODE_WRITE:
1254 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1255 break;
1256 case FMODE_READ|FMODE_WRITE:
1257 set_bit(NFS_O_RDWR_STATE, &state->flags);
1258 }
1259 if (!nfs_need_update_open_stateid(state, stateid))
1260 return;
1261 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1262 nfs4_stateid_copy(&state->stateid, stateid);
1263 nfs4_stateid_copy(&state->open_stateid, stateid);
1264 }
1265
1266 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1267 {
1268 /*
1269 * Protect the call to nfs4_state_set_mode_locked and
1270 * serialise the stateid update
1271 */
1272 write_seqlock(&state->seqlock);
1273 if (deleg_stateid != NULL) {
1274 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1275 set_bit(NFS_DELEGATED_STATE, &state->flags);
1276 }
1277 if (open_stateid != NULL)
1278 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1279 write_sequnlock(&state->seqlock);
1280 spin_lock(&state->owner->so_lock);
1281 update_open_stateflags(state, fmode);
1282 spin_unlock(&state->owner->so_lock);
1283 }
1284
1285 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1286 {
1287 struct nfs_inode *nfsi = NFS_I(state->inode);
1288 struct nfs_delegation *deleg_cur;
1289 int ret = 0;
1290
1291 fmode &= (FMODE_READ|FMODE_WRITE);
1292
1293 rcu_read_lock();
1294 deleg_cur = rcu_dereference(nfsi->delegation);
1295 if (deleg_cur == NULL)
1296 goto no_delegation;
1297
1298 spin_lock(&deleg_cur->lock);
1299 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1300 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1301 (deleg_cur->type & fmode) != fmode)
1302 goto no_delegation_unlock;
1303
1304 if (delegation == NULL)
1305 delegation = &deleg_cur->stateid;
1306 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1307 goto no_delegation_unlock;
1308
1309 nfs_mark_delegation_referenced(deleg_cur);
1310 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1311 ret = 1;
1312 no_delegation_unlock:
1313 spin_unlock(&deleg_cur->lock);
1314 no_delegation:
1315 rcu_read_unlock();
1316
1317 if (!ret && open_stateid != NULL) {
1318 __update_open_stateid(state, open_stateid, NULL, fmode);
1319 ret = 1;
1320 }
1321 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1322 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1323
1324 return ret;
1325 }
1326
1327 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1328 const nfs4_stateid *stateid)
1329 {
1330 struct nfs4_state *state = lsp->ls_state;
1331 bool ret = false;
1332
1333 spin_lock(&state->state_lock);
1334 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1335 goto out_noupdate;
1336 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1337 goto out_noupdate;
1338 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1339 ret = true;
1340 out_noupdate:
1341 spin_unlock(&state->state_lock);
1342 return ret;
1343 }
1344
1345 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1346 {
1347 struct nfs_delegation *delegation;
1348
1349 rcu_read_lock();
1350 delegation = rcu_dereference(NFS_I(inode)->delegation);
1351 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1352 rcu_read_unlock();
1353 return;
1354 }
1355 rcu_read_unlock();
1356 nfs4_inode_return_delegation(inode);
1357 }
1358
1359 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1360 {
1361 struct nfs4_state *state = opendata->state;
1362 struct nfs_inode *nfsi = NFS_I(state->inode);
1363 struct nfs_delegation *delegation;
1364 int open_mode = opendata->o_arg.open_flags;
1365 fmode_t fmode = opendata->o_arg.fmode;
1366 nfs4_stateid stateid;
1367 int ret = -EAGAIN;
1368
1369 for (;;) {
1370 spin_lock(&state->owner->so_lock);
1371 if (can_open_cached(state, fmode, open_mode)) {
1372 update_open_stateflags(state, fmode);
1373 spin_unlock(&state->owner->so_lock);
1374 goto out_return_state;
1375 }
1376 spin_unlock(&state->owner->so_lock);
1377 rcu_read_lock();
1378 delegation = rcu_dereference(nfsi->delegation);
1379 if (!can_open_delegated(delegation, fmode)) {
1380 rcu_read_unlock();
1381 break;
1382 }
1383 /* Save the delegation */
1384 nfs4_stateid_copy(&stateid, &delegation->stateid);
1385 rcu_read_unlock();
1386 nfs_release_seqid(opendata->o_arg.seqid);
1387 if (!opendata->is_recover) {
1388 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1389 if (ret != 0)
1390 goto out;
1391 }
1392 ret = -EAGAIN;
1393
1394 /* Try to update the stateid using the delegation */
1395 if (update_open_stateid(state, NULL, &stateid, fmode))
1396 goto out_return_state;
1397 }
1398 out:
1399 return ERR_PTR(ret);
1400 out_return_state:
1401 atomic_inc(&state->count);
1402 return state;
1403 }
1404
1405 static void
1406 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1407 {
1408 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1409 struct nfs_delegation *delegation;
1410 int delegation_flags = 0;
1411
1412 rcu_read_lock();
1413 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1414 if (delegation)
1415 delegation_flags = delegation->flags;
1416 rcu_read_unlock();
1417 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1418 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1419 "returning a delegation for "
1420 "OPEN(CLAIM_DELEGATE_CUR)\n",
1421 clp->cl_hostname);
1422 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1423 nfs_inode_set_delegation(state->inode,
1424 data->owner->so_cred,
1425 &data->o_res);
1426 else
1427 nfs_inode_reclaim_delegation(state->inode,
1428 data->owner->so_cred,
1429 &data->o_res);
1430 }
1431
1432 /*
1433 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1434 * and update the nfs4_state.
1435 */
1436 static struct nfs4_state *
1437 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1438 {
1439 struct inode *inode = data->state->inode;
1440 struct nfs4_state *state = data->state;
1441 int ret;
1442
1443 if (!data->rpc_done) {
1444 if (data->rpc_status) {
1445 ret = data->rpc_status;
1446 goto err;
1447 }
1448 /* cached opens have already been processed */
1449 goto update;
1450 }
1451
1452 ret = nfs_refresh_inode(inode, &data->f_attr);
1453 if (ret)
1454 goto err;
1455
1456 if (data->o_res.delegation_type != 0)
1457 nfs4_opendata_check_deleg(data, state);
1458 update:
1459 update_open_stateid(state, &data->o_res.stateid, NULL,
1460 data->o_arg.fmode);
1461 atomic_inc(&state->count);
1462
1463 return state;
1464 err:
1465 return ERR_PTR(ret);
1466
1467 }
1468
1469 static struct nfs4_state *
1470 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1471 {
1472 struct inode *inode;
1473 struct nfs4_state *state = NULL;
1474 int ret;
1475
1476 if (!data->rpc_done) {
1477 state = nfs4_try_open_cached(data);
1478 goto out;
1479 }
1480
1481 ret = -EAGAIN;
1482 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1483 goto err;
1484 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1485 ret = PTR_ERR(inode);
1486 if (IS_ERR(inode))
1487 goto err;
1488 ret = -ENOMEM;
1489 state = nfs4_get_open_state(inode, data->owner);
1490 if (state == NULL)
1491 goto err_put_inode;
1492 if (data->o_res.delegation_type != 0)
1493 nfs4_opendata_check_deleg(data, state);
1494 update_open_stateid(state, &data->o_res.stateid, NULL,
1495 data->o_arg.fmode);
1496 iput(inode);
1497 out:
1498 nfs_release_seqid(data->o_arg.seqid);
1499 return state;
1500 err_put_inode:
1501 iput(inode);
1502 err:
1503 return ERR_PTR(ret);
1504 }
1505
1506 static struct nfs4_state *
1507 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1508 {
1509 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1510 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1511 return _nfs4_opendata_to_nfs4_state(data);
1512 }
1513
1514 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1515 {
1516 struct nfs_inode *nfsi = NFS_I(state->inode);
1517 struct nfs_open_context *ctx;
1518
1519 spin_lock(&state->inode->i_lock);
1520 list_for_each_entry(ctx, &nfsi->open_files, list) {
1521 if (ctx->state != state)
1522 continue;
1523 get_nfs_open_context(ctx);
1524 spin_unlock(&state->inode->i_lock);
1525 return ctx;
1526 }
1527 spin_unlock(&state->inode->i_lock);
1528 return ERR_PTR(-ENOENT);
1529 }
1530
1531 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1532 struct nfs4_state *state, enum open_claim_type4 claim)
1533 {
1534 struct nfs4_opendata *opendata;
1535
1536 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1537 NULL, NULL, claim, GFP_NOFS);
1538 if (opendata == NULL)
1539 return ERR_PTR(-ENOMEM);
1540 opendata->state = state;
1541 atomic_inc(&state->count);
1542 return opendata;
1543 }
1544
1545 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1546 {
1547 struct nfs4_state *newstate;
1548 int ret;
1549
1550 if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
1551 opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) &&
1552 (opendata->o_arg.u.delegation_type & fmode) != fmode)
1553 /* This mode can't have been delegated, so we must have
1554 * a valid open_stateid to cover it - not need to reclaim.
1555 */
1556 return 0;
1557 opendata->o_arg.open_flags = 0;
1558 opendata->o_arg.fmode = fmode;
1559 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1560 NFS_SB(opendata->dentry->d_sb),
1561 fmode, 0);
1562 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1563 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1564 nfs4_init_opendata_res(opendata);
1565 ret = _nfs4_recover_proc_open(opendata);
1566 if (ret != 0)
1567 return ret;
1568 newstate = nfs4_opendata_to_nfs4_state(opendata);
1569 if (IS_ERR(newstate))
1570 return PTR_ERR(newstate);
1571 nfs4_close_state(newstate, fmode);
1572 *res = newstate;
1573 return 0;
1574 }
1575
1576 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1577 {
1578 struct nfs4_state *newstate;
1579 int ret;
1580
1581 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1582 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1583 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1584 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1585 /* memory barrier prior to reading state->n_* */
1586 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1587 clear_bit(NFS_OPEN_STATE, &state->flags);
1588 smp_rmb();
1589 if (state->n_rdwr != 0) {
1590 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1591 if (ret != 0)
1592 return ret;
1593 if (newstate != state)
1594 return -ESTALE;
1595 }
1596 if (state->n_wronly != 0) {
1597 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1598 if (ret != 0)
1599 return ret;
1600 if (newstate != state)
1601 return -ESTALE;
1602 }
1603 if (state->n_rdonly != 0) {
1604 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1605 if (ret != 0)
1606 return ret;
1607 if (newstate != state)
1608 return -ESTALE;
1609 }
1610 /*
1611 * We may have performed cached opens for all three recoveries.
1612 * Check if we need to update the current stateid.
1613 */
1614 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1615 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1616 write_seqlock(&state->seqlock);
1617 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1618 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1619 write_sequnlock(&state->seqlock);
1620 }
1621 return 0;
1622 }
1623
1624 /*
1625 * OPEN_RECLAIM:
1626 * reclaim state on the server after a reboot.
1627 */
1628 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1629 {
1630 struct nfs_delegation *delegation;
1631 struct nfs4_opendata *opendata;
1632 fmode_t delegation_type = 0;
1633 int status;
1634
1635 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1636 NFS4_OPEN_CLAIM_PREVIOUS);
1637 if (IS_ERR(opendata))
1638 return PTR_ERR(opendata);
1639 rcu_read_lock();
1640 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1641 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1642 delegation_type = delegation->type;
1643 rcu_read_unlock();
1644 opendata->o_arg.u.delegation_type = delegation_type;
1645 status = nfs4_open_recover(opendata, state);
1646 nfs4_opendata_put(opendata);
1647 return status;
1648 }
1649
1650 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1651 {
1652 struct nfs_server *server = NFS_SERVER(state->inode);
1653 struct nfs4_exception exception = { };
1654 int err;
1655 do {
1656 err = _nfs4_do_open_reclaim(ctx, state);
1657 trace_nfs4_open_reclaim(ctx, 0, err);
1658 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1659 continue;
1660 if (err != -NFS4ERR_DELAY)
1661 break;
1662 nfs4_handle_exception(server, err, &exception);
1663 } while (exception.retry);
1664 return err;
1665 }
1666
1667 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1668 {
1669 struct nfs_open_context *ctx;
1670 int ret;
1671
1672 ctx = nfs4_state_find_open_context(state);
1673 if (IS_ERR(ctx))
1674 return -EAGAIN;
1675 ret = nfs4_do_open_reclaim(ctx, state);
1676 put_nfs_open_context(ctx);
1677 return ret;
1678 }
1679
1680 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1681 {
1682 switch (err) {
1683 default:
1684 printk(KERN_ERR "NFS: %s: unhandled error "
1685 "%d.\n", __func__, err);
1686 case 0:
1687 case -ENOENT:
1688 case -EAGAIN:
1689 case -ESTALE:
1690 break;
1691 case -NFS4ERR_BADSESSION:
1692 case -NFS4ERR_BADSLOT:
1693 case -NFS4ERR_BAD_HIGH_SLOT:
1694 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1695 case -NFS4ERR_DEADSESSION:
1696 set_bit(NFS_DELEGATED_STATE, &state->flags);
1697 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1698 return -EAGAIN;
1699 case -NFS4ERR_STALE_CLIENTID:
1700 case -NFS4ERR_STALE_STATEID:
1701 set_bit(NFS_DELEGATED_STATE, &state->flags);
1702 case -NFS4ERR_EXPIRED:
1703 /* Don't recall a delegation if it was lost */
1704 nfs4_schedule_lease_recovery(server->nfs_client);
1705 return -EAGAIN;
1706 case -NFS4ERR_MOVED:
1707 nfs4_schedule_migration_recovery(server);
1708 return -EAGAIN;
1709 case -NFS4ERR_LEASE_MOVED:
1710 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1711 return -EAGAIN;
1712 case -NFS4ERR_DELEG_REVOKED:
1713 case -NFS4ERR_ADMIN_REVOKED:
1714 case -NFS4ERR_BAD_STATEID:
1715 case -NFS4ERR_OPENMODE:
1716 nfs_inode_find_state_and_recover(state->inode,
1717 stateid);
1718 nfs4_schedule_stateid_recovery(server, state);
1719 return -EAGAIN;
1720 case -NFS4ERR_DELAY:
1721 case -NFS4ERR_GRACE:
1722 set_bit(NFS_DELEGATED_STATE, &state->flags);
1723 ssleep(1);
1724 return -EAGAIN;
1725 case -ENOMEM:
1726 case -NFS4ERR_DENIED:
1727 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1728 return 0;
1729 }
1730 return err;
1731 }
1732
1733 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1734 {
1735 struct nfs_server *server = NFS_SERVER(state->inode);
1736 struct nfs4_opendata *opendata;
1737 int err;
1738
1739 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1740 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1741 if (IS_ERR(opendata))
1742 return PTR_ERR(opendata);
1743 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1744 err = nfs4_open_recover(opendata, state);
1745 nfs4_opendata_put(opendata);
1746 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1747 }
1748
1749 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1750 {
1751 struct nfs4_opendata *data = calldata;
1752
1753 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1754 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1755 }
1756
1757 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1758 {
1759 struct nfs4_opendata *data = calldata;
1760
1761 nfs40_sequence_done(task, &data->c_res.seq_res);
1762
1763 data->rpc_status = task->tk_status;
1764 if (data->rpc_status == 0) {
1765 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1766 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1767 renew_lease(data->o_res.server, data->timestamp);
1768 data->rpc_done = 1;
1769 }
1770 }
1771
1772 static void nfs4_open_confirm_release(void *calldata)
1773 {
1774 struct nfs4_opendata *data = calldata;
1775 struct nfs4_state *state = NULL;
1776
1777 /* If this request hasn't been cancelled, do nothing */
1778 if (data->cancelled == 0)
1779 goto out_free;
1780 /* In case of error, no cleanup! */
1781 if (!data->rpc_done)
1782 goto out_free;
1783 state = nfs4_opendata_to_nfs4_state(data);
1784 if (!IS_ERR(state))
1785 nfs4_close_state(state, data->o_arg.fmode);
1786 out_free:
1787 nfs4_opendata_put(data);
1788 }
1789
1790 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1791 .rpc_call_prepare = nfs4_open_confirm_prepare,
1792 .rpc_call_done = nfs4_open_confirm_done,
1793 .rpc_release = nfs4_open_confirm_release,
1794 };
1795
1796 /*
1797 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1798 */
1799 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1800 {
1801 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
1802 struct rpc_task *task;
1803 struct rpc_message msg = {
1804 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1805 .rpc_argp = &data->c_arg,
1806 .rpc_resp = &data->c_res,
1807 .rpc_cred = data->owner->so_cred,
1808 };
1809 struct rpc_task_setup task_setup_data = {
1810 .rpc_client = server->client,
1811 .rpc_message = &msg,
1812 .callback_ops = &nfs4_open_confirm_ops,
1813 .callback_data = data,
1814 .workqueue = nfsiod_workqueue,
1815 .flags = RPC_TASK_ASYNC,
1816 };
1817 int status;
1818
1819 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1820 kref_get(&data->kref);
1821 data->rpc_done = 0;
1822 data->rpc_status = 0;
1823 data->timestamp = jiffies;
1824 task = rpc_run_task(&task_setup_data);
1825 if (IS_ERR(task))
1826 return PTR_ERR(task);
1827 status = nfs4_wait_for_completion_rpc_task(task);
1828 if (status != 0) {
1829 data->cancelled = 1;
1830 smp_wmb();
1831 } else
1832 status = data->rpc_status;
1833 rpc_put_task(task);
1834 return status;
1835 }
1836
1837 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1838 {
1839 struct nfs4_opendata *data = calldata;
1840 struct nfs4_state_owner *sp = data->owner;
1841 struct nfs_client *clp = sp->so_server->nfs_client;
1842
1843 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1844 goto out_wait;
1845 /*
1846 * Check if we still need to send an OPEN call, or if we can use
1847 * a delegation instead.
1848 */
1849 if (data->state != NULL) {
1850 struct nfs_delegation *delegation;
1851
1852 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1853 goto out_no_action;
1854 rcu_read_lock();
1855 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1856 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1857 data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH &&
1858 can_open_delegated(delegation, data->o_arg.fmode))
1859 goto unlock_no_action;
1860 rcu_read_unlock();
1861 }
1862 /* Update client id. */
1863 data->o_arg.clientid = clp->cl_clientid;
1864 switch (data->o_arg.claim) {
1865 case NFS4_OPEN_CLAIM_PREVIOUS:
1866 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1867 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1868 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1869 case NFS4_OPEN_CLAIM_FH:
1870 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1871 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1872 }
1873 data->timestamp = jiffies;
1874 if (nfs4_setup_sequence(data->o_arg.server,
1875 &data->o_arg.seq_args,
1876 &data->o_res.seq_res,
1877 task) != 0)
1878 nfs_release_seqid(data->o_arg.seqid);
1879
1880 /* Set the create mode (note dependency on the session type) */
1881 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
1882 if (data->o_arg.open_flags & O_EXCL) {
1883 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
1884 if (nfs4_has_persistent_session(clp))
1885 data->o_arg.createmode = NFS4_CREATE_GUARDED;
1886 else if (clp->cl_mvops->minor_version > 0)
1887 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
1888 }
1889 return;
1890 unlock_no_action:
1891 rcu_read_unlock();
1892 out_no_action:
1893 task->tk_action = NULL;
1894 out_wait:
1895 nfs4_sequence_done(task, &data->o_res.seq_res);
1896 }
1897
1898 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1899 {
1900 struct nfs4_opendata *data = calldata;
1901
1902 data->rpc_status = task->tk_status;
1903
1904 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1905 return;
1906
1907 if (task->tk_status == 0) {
1908 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
1909 switch (data->o_res.f_attr->mode & S_IFMT) {
1910 case S_IFREG:
1911 break;
1912 case S_IFLNK:
1913 data->rpc_status = -ELOOP;
1914 break;
1915 case S_IFDIR:
1916 data->rpc_status = -EISDIR;
1917 break;
1918 default:
1919 data->rpc_status = -ENOTDIR;
1920 }
1921 }
1922 renew_lease(data->o_res.server, data->timestamp);
1923 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1924 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1925 }
1926 data->rpc_done = 1;
1927 }
1928
1929 static void nfs4_open_release(void *calldata)
1930 {
1931 struct nfs4_opendata *data = calldata;
1932 struct nfs4_state *state = NULL;
1933
1934 /* If this request hasn't been cancelled, do nothing */
1935 if (data->cancelled == 0)
1936 goto out_free;
1937 /* In case of error, no cleanup! */
1938 if (data->rpc_status != 0 || !data->rpc_done)
1939 goto out_free;
1940 /* In case we need an open_confirm, no cleanup! */
1941 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1942 goto out_free;
1943 state = nfs4_opendata_to_nfs4_state(data);
1944 if (!IS_ERR(state))
1945 nfs4_close_state(state, data->o_arg.fmode);
1946 out_free:
1947 nfs4_opendata_put(data);
1948 }
1949
1950 static const struct rpc_call_ops nfs4_open_ops = {
1951 .rpc_call_prepare = nfs4_open_prepare,
1952 .rpc_call_done = nfs4_open_done,
1953 .rpc_release = nfs4_open_release,
1954 };
1955
1956 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1957 {
1958 struct inode *dir = d_inode(data->dir);
1959 struct nfs_server *server = NFS_SERVER(dir);
1960 struct nfs_openargs *o_arg = &data->o_arg;
1961 struct nfs_openres *o_res = &data->o_res;
1962 struct rpc_task *task;
1963 struct rpc_message msg = {
1964 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1965 .rpc_argp = o_arg,
1966 .rpc_resp = o_res,
1967 .rpc_cred = data->owner->so_cred,
1968 };
1969 struct rpc_task_setup task_setup_data = {
1970 .rpc_client = server->client,
1971 .rpc_message = &msg,
1972 .callback_ops = &nfs4_open_ops,
1973 .callback_data = data,
1974 .workqueue = nfsiod_workqueue,
1975 .flags = RPC_TASK_ASYNC,
1976 };
1977 int status;
1978
1979 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1980 kref_get(&data->kref);
1981 data->rpc_done = 0;
1982 data->rpc_status = 0;
1983 data->cancelled = 0;
1984 data->is_recover = 0;
1985 if (isrecover) {
1986 nfs4_set_sequence_privileged(&o_arg->seq_args);
1987 data->is_recover = 1;
1988 }
1989 task = rpc_run_task(&task_setup_data);
1990 if (IS_ERR(task))
1991 return PTR_ERR(task);
1992 status = nfs4_wait_for_completion_rpc_task(task);
1993 if (status != 0) {
1994 data->cancelled = 1;
1995 smp_wmb();
1996 } else
1997 status = data->rpc_status;
1998 rpc_put_task(task);
1999
2000 return status;
2001 }
2002
2003 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2004 {
2005 struct inode *dir = d_inode(data->dir);
2006 struct nfs_openres *o_res = &data->o_res;
2007 int status;
2008
2009 status = nfs4_run_open_task(data, 1);
2010 if (status != 0 || !data->rpc_done)
2011 return status;
2012
2013 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2014
2015 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2016 status = _nfs4_proc_open_confirm(data);
2017 if (status != 0)
2018 return status;
2019 }
2020
2021 return status;
2022 }
2023
2024 /*
2025 * Additional permission checks in order to distinguish between an
2026 * open for read, and an open for execute. This works around the
2027 * fact that NFSv4 OPEN treats read and execute permissions as being
2028 * the same.
2029 * Note that in the non-execute case, we want to turn off permission
2030 * checking if we just created a new file (POSIX open() semantics).
2031 */
2032 static int nfs4_opendata_access(struct rpc_cred *cred,
2033 struct nfs4_opendata *opendata,
2034 struct nfs4_state *state, fmode_t fmode,
2035 int openflags)
2036 {
2037 struct nfs_access_entry cache;
2038 u32 mask;
2039
2040 /* access call failed or for some reason the server doesn't
2041 * support any access modes -- defer access call until later */
2042 if (opendata->o_res.access_supported == 0)
2043 return 0;
2044
2045 mask = 0;
2046 /*
2047 * Use openflags to check for exec, because fmode won't
2048 * always have FMODE_EXEC set when file open for exec.
2049 */
2050 if (openflags & __FMODE_EXEC) {
2051 /* ONLY check for exec rights */
2052 mask = MAY_EXEC;
2053 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2054 mask = MAY_READ;
2055
2056 cache.cred = cred;
2057 cache.jiffies = jiffies;
2058 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2059 nfs_access_add_cache(state->inode, &cache);
2060
2061 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2062 return 0;
2063
2064 /* even though OPEN succeeded, access is denied. Close the file */
2065 nfs4_close_state(state, fmode);
2066 return -EACCES;
2067 }
2068
2069 /*
2070 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2071 */
2072 static int _nfs4_proc_open(struct nfs4_opendata *data)
2073 {
2074 struct inode *dir = d_inode(data->dir);
2075 struct nfs_server *server = NFS_SERVER(dir);
2076 struct nfs_openargs *o_arg = &data->o_arg;
2077 struct nfs_openres *o_res = &data->o_res;
2078 int status;
2079
2080 status = nfs4_run_open_task(data, 0);
2081 if (!data->rpc_done)
2082 return status;
2083 if (status != 0) {
2084 if (status == -NFS4ERR_BADNAME &&
2085 !(o_arg->open_flags & O_CREAT))
2086 return -ENOENT;
2087 return status;
2088 }
2089
2090 nfs_fattr_map_and_free_names(server, &data->f_attr);
2091
2092 if (o_arg->open_flags & O_CREAT) {
2093 update_changeattr(dir, &o_res->cinfo);
2094 if (o_arg->open_flags & O_EXCL)
2095 data->file_created = 1;
2096 else if (o_res->cinfo.before != o_res->cinfo.after)
2097 data->file_created = 1;
2098 }
2099 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2100 server->caps &= ~NFS_CAP_POSIX_LOCK;
2101 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2102 status = _nfs4_proc_open_confirm(data);
2103 if (status != 0)
2104 return status;
2105 }
2106 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2107 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2108 return 0;
2109 }
2110
2111 static int nfs4_recover_expired_lease(struct nfs_server *server)
2112 {
2113 return nfs4_client_recover_expired_lease(server->nfs_client);
2114 }
2115
2116 /*
2117 * OPEN_EXPIRED:
2118 * reclaim state on the server after a network partition.
2119 * Assumes caller holds the appropriate lock
2120 */
2121 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2122 {
2123 struct nfs4_opendata *opendata;
2124 int ret;
2125
2126 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2127 NFS4_OPEN_CLAIM_FH);
2128 if (IS_ERR(opendata))
2129 return PTR_ERR(opendata);
2130 ret = nfs4_open_recover(opendata, state);
2131 if (ret == -ESTALE)
2132 d_drop(ctx->dentry);
2133 nfs4_opendata_put(opendata);
2134 return ret;
2135 }
2136
2137 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2138 {
2139 struct nfs_server *server = NFS_SERVER(state->inode);
2140 struct nfs4_exception exception = { };
2141 int err;
2142
2143 do {
2144 err = _nfs4_open_expired(ctx, state);
2145 trace_nfs4_open_expired(ctx, 0, err);
2146 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2147 continue;
2148 switch (err) {
2149 default:
2150 goto out;
2151 case -NFS4ERR_GRACE:
2152 case -NFS4ERR_DELAY:
2153 nfs4_handle_exception(server, err, &exception);
2154 err = 0;
2155 }
2156 } while (exception.retry);
2157 out:
2158 return err;
2159 }
2160
2161 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2162 {
2163 struct nfs_open_context *ctx;
2164 int ret;
2165
2166 ctx = nfs4_state_find_open_context(state);
2167 if (IS_ERR(ctx))
2168 return -EAGAIN;
2169 ret = nfs4_do_open_expired(ctx, state);
2170 put_nfs_open_context(ctx);
2171 return ret;
2172 }
2173
2174 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2175 {
2176 nfs_remove_bad_delegation(state->inode);
2177 write_seqlock(&state->seqlock);
2178 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2179 write_sequnlock(&state->seqlock);
2180 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2181 }
2182
2183 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2184 {
2185 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2186 nfs_finish_clear_delegation_stateid(state);
2187 }
2188
2189 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2190 {
2191 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2192 nfs40_clear_delegation_stateid(state);
2193 return nfs4_open_expired(sp, state);
2194 }
2195
2196 #if defined(CONFIG_NFS_V4_1)
2197 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2198 {
2199 struct nfs_server *server = NFS_SERVER(state->inode);
2200 nfs4_stateid stateid;
2201 struct nfs_delegation *delegation;
2202 struct rpc_cred *cred;
2203 int status;
2204
2205 /* Get the delegation credential for use by test/free_stateid */
2206 rcu_read_lock();
2207 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2208 if (delegation == NULL) {
2209 rcu_read_unlock();
2210 return;
2211 }
2212
2213 nfs4_stateid_copy(&stateid, &delegation->stateid);
2214 cred = get_rpccred(delegation->cred);
2215 rcu_read_unlock();
2216 status = nfs41_test_stateid(server, &stateid, cred);
2217 trace_nfs4_test_delegation_stateid(state, NULL, status);
2218
2219 if (status != NFS_OK) {
2220 /* Free the stateid unless the server explicitly
2221 * informs us the stateid is unrecognized. */
2222 if (status != -NFS4ERR_BAD_STATEID)
2223 nfs41_free_stateid(server, &stateid, cred);
2224 nfs_finish_clear_delegation_stateid(state);
2225 }
2226
2227 put_rpccred(cred);
2228 }
2229
2230 /**
2231 * nfs41_check_open_stateid - possibly free an open stateid
2232 *
2233 * @state: NFSv4 state for an inode
2234 *
2235 * Returns NFS_OK if recovery for this stateid is now finished.
2236 * Otherwise a negative NFS4ERR value is returned.
2237 */
2238 static int nfs41_check_open_stateid(struct nfs4_state *state)
2239 {
2240 struct nfs_server *server = NFS_SERVER(state->inode);
2241 nfs4_stateid *stateid = &state->open_stateid;
2242 struct rpc_cred *cred = state->owner->so_cred;
2243 int status;
2244
2245 /* If a state reset has been done, test_stateid is unneeded */
2246 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2247 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2248 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2249 return -NFS4ERR_BAD_STATEID;
2250
2251 status = nfs41_test_stateid(server, stateid, cred);
2252 trace_nfs4_test_open_stateid(state, NULL, status);
2253 if (status != NFS_OK) {
2254 /* Free the stateid unless the server explicitly
2255 * informs us the stateid is unrecognized. */
2256 if (status != -NFS4ERR_BAD_STATEID)
2257 nfs41_free_stateid(server, stateid, cred);
2258
2259 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2260 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2261 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2262 clear_bit(NFS_OPEN_STATE, &state->flags);
2263 }
2264 return status;
2265 }
2266
2267 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2268 {
2269 int status;
2270
2271 nfs41_check_delegation_stateid(state);
2272 status = nfs41_check_open_stateid(state);
2273 if (status != NFS_OK)
2274 status = nfs4_open_expired(sp, state);
2275 return status;
2276 }
2277 #endif
2278
2279 /*
2280 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2281 * fields corresponding to attributes that were used to store the verifier.
2282 * Make sure we clobber those fields in the later setattr call
2283 */
2284 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
2285 {
2286 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2287 !(sattr->ia_valid & ATTR_ATIME_SET))
2288 sattr->ia_valid |= ATTR_ATIME;
2289
2290 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2291 !(sattr->ia_valid & ATTR_MTIME_SET))
2292 sattr->ia_valid |= ATTR_MTIME;
2293 }
2294
2295 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2296 fmode_t fmode,
2297 int flags,
2298 struct nfs_open_context *ctx)
2299 {
2300 struct nfs4_state_owner *sp = opendata->owner;
2301 struct nfs_server *server = sp->so_server;
2302 struct dentry *dentry;
2303 struct nfs4_state *state;
2304 unsigned int seq;
2305 int ret;
2306
2307 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2308
2309 ret = _nfs4_proc_open(opendata);
2310 if (ret != 0)
2311 goto out;
2312
2313 state = nfs4_opendata_to_nfs4_state(opendata);
2314 ret = PTR_ERR(state);
2315 if (IS_ERR(state))
2316 goto out;
2317 if (server->caps & NFS_CAP_POSIX_LOCK)
2318 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2319
2320 dentry = opendata->dentry;
2321 if (d_really_is_negative(dentry)) {
2322 /* FIXME: Is this d_drop() ever needed? */
2323 d_drop(dentry);
2324 dentry = d_add_unique(dentry, igrab(state->inode));
2325 if (dentry == NULL) {
2326 dentry = opendata->dentry;
2327 } else if (dentry != ctx->dentry) {
2328 dput(ctx->dentry);
2329 ctx->dentry = dget(dentry);
2330 }
2331 nfs_set_verifier(dentry,
2332 nfs_save_change_attribute(d_inode(opendata->dir)));
2333 }
2334
2335 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2336 if (ret != 0)
2337 goto out;
2338
2339 ctx->state = state;
2340 if (d_inode(dentry) == state->inode) {
2341 nfs_inode_attach_open_context(ctx);
2342 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2343 nfs4_schedule_stateid_recovery(server, state);
2344 }
2345 out:
2346 return ret;
2347 }
2348
2349 /*
2350 * Returns a referenced nfs4_state
2351 */
2352 static int _nfs4_do_open(struct inode *dir,
2353 struct nfs_open_context *ctx,
2354 int flags,
2355 struct iattr *sattr,
2356 struct nfs4_label *label,
2357 int *opened)
2358 {
2359 struct nfs4_state_owner *sp;
2360 struct nfs4_state *state = NULL;
2361 struct nfs_server *server = NFS_SERVER(dir);
2362 struct nfs4_opendata *opendata;
2363 struct dentry *dentry = ctx->dentry;
2364 struct rpc_cred *cred = ctx->cred;
2365 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2366 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2367 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2368 struct nfs4_label *olabel = NULL;
2369 int status;
2370
2371 /* Protect against reboot recovery conflicts */
2372 status = -ENOMEM;
2373 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2374 if (sp == NULL) {
2375 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2376 goto out_err;
2377 }
2378 status = nfs4_recover_expired_lease(server);
2379 if (status != 0)
2380 goto err_put_state_owner;
2381 if (d_really_is_positive(dentry))
2382 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2383 status = -ENOMEM;
2384 if (d_really_is_positive(dentry))
2385 claim = NFS4_OPEN_CLAIM_FH;
2386 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2387 label, claim, GFP_KERNEL);
2388 if (opendata == NULL)
2389 goto err_put_state_owner;
2390
2391 if (label) {
2392 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2393 if (IS_ERR(olabel)) {
2394 status = PTR_ERR(olabel);
2395 goto err_opendata_put;
2396 }
2397 }
2398
2399 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2400 if (!opendata->f_attr.mdsthreshold) {
2401 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2402 if (!opendata->f_attr.mdsthreshold)
2403 goto err_free_label;
2404 }
2405 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2406 }
2407 if (d_really_is_positive(dentry))
2408 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2409
2410 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2411 if (status != 0)
2412 goto err_free_label;
2413 state = ctx->state;
2414
2415 if ((opendata->o_arg.open_flags & O_EXCL) &&
2416 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2417 nfs4_exclusive_attrset(opendata, sattr);
2418
2419 nfs_fattr_init(opendata->o_res.f_attr);
2420 status = nfs4_do_setattr(state->inode, cred,
2421 opendata->o_res.f_attr, sattr,
2422 state, label, olabel);
2423 if (status == 0) {
2424 nfs_setattr_update_inode(state->inode, sattr,
2425 opendata->o_res.f_attr);
2426 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2427 }
2428 }
2429 if (opendata->file_created)
2430 *opened |= FILE_CREATED;
2431
2432 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2433 *ctx_th = opendata->f_attr.mdsthreshold;
2434 opendata->f_attr.mdsthreshold = NULL;
2435 }
2436
2437 nfs4_label_free(olabel);
2438
2439 nfs4_opendata_put(opendata);
2440 nfs4_put_state_owner(sp);
2441 return 0;
2442 err_free_label:
2443 nfs4_label_free(olabel);
2444 err_opendata_put:
2445 nfs4_opendata_put(opendata);
2446 err_put_state_owner:
2447 nfs4_put_state_owner(sp);
2448 out_err:
2449 return status;
2450 }
2451
2452
2453 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2454 struct nfs_open_context *ctx,
2455 int flags,
2456 struct iattr *sattr,
2457 struct nfs4_label *label,
2458 int *opened)
2459 {
2460 struct nfs_server *server = NFS_SERVER(dir);
2461 struct nfs4_exception exception = { };
2462 struct nfs4_state *res;
2463 int status;
2464
2465 do {
2466 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2467 res = ctx->state;
2468 trace_nfs4_open_file(ctx, flags, status);
2469 if (status == 0)
2470 break;
2471 /* NOTE: BAD_SEQID means the server and client disagree about the
2472 * book-keeping w.r.t. state-changing operations
2473 * (OPEN/CLOSE/LOCK/LOCKU...)
2474 * It is actually a sign of a bug on the client or on the server.
2475 *
2476 * If we receive a BAD_SEQID error in the particular case of
2477 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2478 * have unhashed the old state_owner for us, and that we can
2479 * therefore safely retry using a new one. We should still warn
2480 * the user though...
2481 */
2482 if (status == -NFS4ERR_BAD_SEQID) {
2483 pr_warn_ratelimited("NFS: v4 server %s "
2484 " returned a bad sequence-id error!\n",
2485 NFS_SERVER(dir)->nfs_client->cl_hostname);
2486 exception.retry = 1;
2487 continue;
2488 }
2489 /*
2490 * BAD_STATEID on OPEN means that the server cancelled our
2491 * state before it received the OPEN_CONFIRM.
2492 * Recover by retrying the request as per the discussion
2493 * on Page 181 of RFC3530.
2494 */
2495 if (status == -NFS4ERR_BAD_STATEID) {
2496 exception.retry = 1;
2497 continue;
2498 }
2499 if (status == -EAGAIN) {
2500 /* We must have found a delegation */
2501 exception.retry = 1;
2502 continue;
2503 }
2504 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2505 continue;
2506 res = ERR_PTR(nfs4_handle_exception(server,
2507 status, &exception));
2508 } while (exception.retry);
2509 return res;
2510 }
2511
2512 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2513 struct nfs_fattr *fattr, struct iattr *sattr,
2514 struct nfs4_state *state, struct nfs4_label *ilabel,
2515 struct nfs4_label *olabel)
2516 {
2517 struct nfs_server *server = NFS_SERVER(inode);
2518 struct nfs_setattrargs arg = {
2519 .fh = NFS_FH(inode),
2520 .iap = sattr,
2521 .server = server,
2522 .bitmask = server->attr_bitmask,
2523 .label = ilabel,
2524 };
2525 struct nfs_setattrres res = {
2526 .fattr = fattr,
2527 .label = olabel,
2528 .server = server,
2529 };
2530 struct rpc_message msg = {
2531 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2532 .rpc_argp = &arg,
2533 .rpc_resp = &res,
2534 .rpc_cred = cred,
2535 };
2536 unsigned long timestamp = jiffies;
2537 fmode_t fmode;
2538 bool truncate;
2539 int status;
2540
2541 arg.bitmask = nfs4_bitmask(server, ilabel);
2542 if (ilabel)
2543 arg.bitmask = nfs4_bitmask(server, olabel);
2544
2545 nfs_fattr_init(fattr);
2546
2547 /* Servers should only apply open mode checks for file size changes */
2548 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2549 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2550
2551 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2552 /* Use that stateid */
2553 } else if (truncate && state != NULL) {
2554 struct nfs_lockowner lockowner = {
2555 .l_owner = current->files,
2556 .l_pid = current->tgid,
2557 };
2558 if (!nfs4_valid_open_stateid(state))
2559 return -EBADF;
2560 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2561 &lockowner) == -EIO)
2562 return -EBADF;
2563 } else
2564 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2565
2566 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2567 if (status == 0 && state != NULL)
2568 renew_lease(server, timestamp);
2569 return status;
2570 }
2571
2572 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2573 struct nfs_fattr *fattr, struct iattr *sattr,
2574 struct nfs4_state *state, struct nfs4_label *ilabel,
2575 struct nfs4_label *olabel)
2576 {
2577 struct nfs_server *server = NFS_SERVER(inode);
2578 struct nfs4_exception exception = {
2579 .state = state,
2580 .inode = inode,
2581 };
2582 int err;
2583 do {
2584 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
2585 trace_nfs4_setattr(inode, err);
2586 switch (err) {
2587 case -NFS4ERR_OPENMODE:
2588 if (!(sattr->ia_valid & ATTR_SIZE)) {
2589 pr_warn_once("NFSv4: server %s is incorrectly "
2590 "applying open mode checks to "
2591 "a SETATTR that is not "
2592 "changing file size.\n",
2593 server->nfs_client->cl_hostname);
2594 }
2595 if (state && !(state->state & FMODE_WRITE)) {
2596 err = -EBADF;
2597 if (sattr->ia_valid & ATTR_OPEN)
2598 err = -EACCES;
2599 goto out;
2600 }
2601 }
2602 err = nfs4_handle_exception(server, err, &exception);
2603 } while (exception.retry);
2604 out:
2605 return err;
2606 }
2607
2608 struct nfs4_closedata {
2609 struct inode *inode;
2610 struct nfs4_state *state;
2611 struct nfs_closeargs arg;
2612 struct nfs_closeres res;
2613 struct nfs_fattr fattr;
2614 unsigned long timestamp;
2615 bool roc;
2616 u32 roc_barrier;
2617 };
2618
2619 static void nfs4_free_closedata(void *data)
2620 {
2621 struct nfs4_closedata *calldata = data;
2622 struct nfs4_state_owner *sp = calldata->state->owner;
2623 struct super_block *sb = calldata->state->inode->i_sb;
2624
2625 if (calldata->roc)
2626 pnfs_roc_release(calldata->state->inode);
2627 nfs4_put_open_state(calldata->state);
2628 nfs_free_seqid(calldata->arg.seqid);
2629 nfs4_put_state_owner(sp);
2630 nfs_sb_deactive(sb);
2631 kfree(calldata);
2632 }
2633
2634 static void nfs4_close_done(struct rpc_task *task, void *data)
2635 {
2636 struct nfs4_closedata *calldata = data;
2637 struct nfs4_state *state = calldata->state;
2638 struct nfs_server *server = NFS_SERVER(calldata->inode);
2639 nfs4_stateid *res_stateid = NULL;
2640
2641 dprintk("%s: begin!\n", __func__);
2642 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2643 return;
2644 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2645 /* hmm. we are done with the inode, and in the process of freeing
2646 * the state_owner. we keep this around to process errors
2647 */
2648 switch (task->tk_status) {
2649 case 0:
2650 res_stateid = &calldata->res.stateid;
2651 if (calldata->arg.fmode == 0 && calldata->roc)
2652 pnfs_roc_set_barrier(state->inode,
2653 calldata->roc_barrier);
2654 renew_lease(server, calldata->timestamp);
2655 break;
2656 case -NFS4ERR_ADMIN_REVOKED:
2657 case -NFS4ERR_STALE_STATEID:
2658 case -NFS4ERR_OLD_STATEID:
2659 case -NFS4ERR_BAD_STATEID:
2660 case -NFS4ERR_EXPIRED:
2661 if (!nfs4_stateid_match(&calldata->arg.stateid,
2662 &state->open_stateid)) {
2663 rpc_restart_call_prepare(task);
2664 goto out_release;
2665 }
2666 if (calldata->arg.fmode == 0)
2667 break;
2668 default:
2669 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2670 rpc_restart_call_prepare(task);
2671 goto out_release;
2672 }
2673 }
2674 nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode);
2675 out_release:
2676 nfs_release_seqid(calldata->arg.seqid);
2677 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2678 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2679 }
2680
2681 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2682 {
2683 struct nfs4_closedata *calldata = data;
2684 struct nfs4_state *state = calldata->state;
2685 struct inode *inode = calldata->inode;
2686 bool is_rdonly, is_wronly, is_rdwr;
2687 int call_close = 0;
2688
2689 dprintk("%s: begin!\n", __func__);
2690 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2691 goto out_wait;
2692
2693 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2694 spin_lock(&state->owner->so_lock);
2695 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2696 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2697 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2698 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2699 /* Calculate the change in open mode */
2700 calldata->arg.fmode = 0;
2701 if (state->n_rdwr == 0) {
2702 if (state->n_rdonly == 0)
2703 call_close |= is_rdonly;
2704 else if (is_rdonly)
2705 calldata->arg.fmode |= FMODE_READ;
2706 if (state->n_wronly == 0)
2707 call_close |= is_wronly;
2708 else if (is_wronly)
2709 calldata->arg.fmode |= FMODE_WRITE;
2710 } else if (is_rdwr)
2711 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2712
2713 if (calldata->arg.fmode == 0)
2714 call_close |= is_rdwr;
2715
2716 if (!nfs4_valid_open_stateid(state))
2717 call_close = 0;
2718 spin_unlock(&state->owner->so_lock);
2719
2720 if (!call_close) {
2721 /* Note: exit _without_ calling nfs4_close_done */
2722 goto out_no_action;
2723 }
2724
2725 if (calldata->arg.fmode == 0) {
2726 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2727 if (calldata->roc &&
2728 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) {
2729 nfs_release_seqid(calldata->arg.seqid);
2730 goto out_wait;
2731 }
2732 }
2733 calldata->arg.share_access =
2734 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2735 calldata->arg.fmode, 0);
2736
2737 nfs_fattr_init(calldata->res.fattr);
2738 calldata->timestamp = jiffies;
2739 if (nfs4_setup_sequence(NFS_SERVER(inode),
2740 &calldata->arg.seq_args,
2741 &calldata->res.seq_res,
2742 task) != 0)
2743 nfs_release_seqid(calldata->arg.seqid);
2744 dprintk("%s: done!\n", __func__);
2745 return;
2746 out_no_action:
2747 task->tk_action = NULL;
2748 out_wait:
2749 nfs4_sequence_done(task, &calldata->res.seq_res);
2750 }
2751
2752 static const struct rpc_call_ops nfs4_close_ops = {
2753 .rpc_call_prepare = nfs4_close_prepare,
2754 .rpc_call_done = nfs4_close_done,
2755 .rpc_release = nfs4_free_closedata,
2756 };
2757
2758 static bool nfs4_roc(struct inode *inode)
2759 {
2760 if (!nfs_have_layout(inode))
2761 return false;
2762 return pnfs_roc(inode);
2763 }
2764
2765 /*
2766 * It is possible for data to be read/written from a mem-mapped file
2767 * after the sys_close call (which hits the vfs layer as a flush).
2768 * This means that we can't safely call nfsv4 close on a file until
2769 * the inode is cleared. This in turn means that we are not good
2770 * NFSv4 citizens - we do not indicate to the server to update the file's
2771 * share state even when we are done with one of the three share
2772 * stateid's in the inode.
2773 *
2774 * NOTE: Caller must be holding the sp->so_owner semaphore!
2775 */
2776 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2777 {
2778 struct nfs_server *server = NFS_SERVER(state->inode);
2779 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
2780 struct nfs4_closedata *calldata;
2781 struct nfs4_state_owner *sp = state->owner;
2782 struct rpc_task *task;
2783 struct rpc_message msg = {
2784 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2785 .rpc_cred = state->owner->so_cred,
2786 };
2787 struct rpc_task_setup task_setup_data = {
2788 .rpc_client = server->client,
2789 .rpc_message = &msg,
2790 .callback_ops = &nfs4_close_ops,
2791 .workqueue = nfsiod_workqueue,
2792 .flags = RPC_TASK_ASYNC,
2793 };
2794 int status = -ENOMEM;
2795
2796 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
2797 &task_setup_data.rpc_client, &msg);
2798
2799 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2800 if (calldata == NULL)
2801 goto out;
2802 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2803 calldata->inode = state->inode;
2804 calldata->state = state;
2805 calldata->arg.fh = NFS_FH(state->inode);
2806 /* Serialization for the sequence id */
2807 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
2808 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
2809 if (IS_ERR(calldata->arg.seqid))
2810 goto out_free_calldata;
2811 calldata->arg.fmode = 0;
2812 calldata->arg.bitmask = server->cache_consistency_bitmask;
2813 calldata->res.fattr = &calldata->fattr;
2814 calldata->res.seqid = calldata->arg.seqid;
2815 calldata->res.server = server;
2816 calldata->roc = nfs4_roc(state->inode);
2817 nfs_sb_active(calldata->inode->i_sb);
2818
2819 msg.rpc_argp = &calldata->arg;
2820 msg.rpc_resp = &calldata->res;
2821 task_setup_data.callback_data = calldata;
2822 task = rpc_run_task(&task_setup_data);
2823 if (IS_ERR(task))
2824 return PTR_ERR(task);
2825 status = 0;
2826 if (wait)
2827 status = rpc_wait_for_completion_task(task);
2828 rpc_put_task(task);
2829 return status;
2830 out_free_calldata:
2831 kfree(calldata);
2832 out:
2833 nfs4_put_open_state(state);
2834 nfs4_put_state_owner(sp);
2835 return status;
2836 }
2837
2838 static struct inode *
2839 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2840 int open_flags, struct iattr *attr, int *opened)
2841 {
2842 struct nfs4_state *state;
2843 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
2844
2845 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2846
2847 /* Protect against concurrent sillydeletes */
2848 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
2849
2850 nfs4_label_release_security(label);
2851
2852 if (IS_ERR(state))
2853 return ERR_CAST(state);
2854 return state->inode;
2855 }
2856
2857 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2858 {
2859 if (ctx->state == NULL)
2860 return;
2861 if (is_sync)
2862 nfs4_close_sync(ctx->state, ctx->mode);
2863 else
2864 nfs4_close_state(ctx->state, ctx->mode);
2865 }
2866
2867 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
2868 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
2869 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
2870
2871 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2872 {
2873 struct nfs4_server_caps_arg args = {
2874 .fhandle = fhandle,
2875 };
2876 struct nfs4_server_caps_res res = {};
2877 struct rpc_message msg = {
2878 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2879 .rpc_argp = &args,
2880 .rpc_resp = &res,
2881 };
2882 int status;
2883
2884 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2885 if (status == 0) {
2886 /* Sanity check the server answers */
2887 switch (server->nfs_client->cl_minorversion) {
2888 case 0:
2889 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
2890 res.attr_bitmask[2] = 0;
2891 break;
2892 case 1:
2893 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
2894 break;
2895 case 2:
2896 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
2897 }
2898 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2899 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2900 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2901 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2902 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2903 NFS_CAP_CTIME|NFS_CAP_MTIME|
2904 NFS_CAP_SECURITY_LABEL);
2905 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
2906 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
2907 server->caps |= NFS_CAP_ACLS;
2908 if (res.has_links != 0)
2909 server->caps |= NFS_CAP_HARDLINKS;
2910 if (res.has_symlinks != 0)
2911 server->caps |= NFS_CAP_SYMLINKS;
2912 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2913 server->caps |= NFS_CAP_FILEID;
2914 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2915 server->caps |= NFS_CAP_MODE;
2916 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2917 server->caps |= NFS_CAP_NLINK;
2918 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2919 server->caps |= NFS_CAP_OWNER;
2920 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2921 server->caps |= NFS_CAP_OWNER_GROUP;
2922 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2923 server->caps |= NFS_CAP_ATIME;
2924 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2925 server->caps |= NFS_CAP_CTIME;
2926 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2927 server->caps |= NFS_CAP_MTIME;
2928 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
2929 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
2930 server->caps |= NFS_CAP_SECURITY_LABEL;
2931 #endif
2932 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
2933 sizeof(server->attr_bitmask));
2934 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
2935
2936 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2937 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2938 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2939 server->cache_consistency_bitmask[2] = 0;
2940 server->acl_bitmask = res.acl_bitmask;
2941 server->fh_expire_type = res.fh_expire_type;
2942 }
2943
2944 return status;
2945 }
2946
2947 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2948 {
2949 struct nfs4_exception exception = { };
2950 int err;
2951 do {
2952 err = nfs4_handle_exception(server,
2953 _nfs4_server_capabilities(server, fhandle),
2954 &exception);
2955 } while (exception.retry);
2956 return err;
2957 }
2958
2959 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2960 struct nfs_fsinfo *info)
2961 {
2962 u32 bitmask[3];
2963 struct nfs4_lookup_root_arg args = {
2964 .bitmask = bitmask,
2965 };
2966 struct nfs4_lookup_res res = {
2967 .server = server,
2968 .fattr = info->fattr,
2969 .fh = fhandle,
2970 };
2971 struct rpc_message msg = {
2972 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2973 .rpc_argp = &args,
2974 .rpc_resp = &res,
2975 };
2976
2977 bitmask[0] = nfs4_fattr_bitmap[0];
2978 bitmask[1] = nfs4_fattr_bitmap[1];
2979 /*
2980 * Process the label in the upcoming getfattr
2981 */
2982 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
2983
2984 nfs_fattr_init(info->fattr);
2985 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2986 }
2987
2988 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2989 struct nfs_fsinfo *info)
2990 {
2991 struct nfs4_exception exception = { };
2992 int err;
2993 do {
2994 err = _nfs4_lookup_root(server, fhandle, info);
2995 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
2996 switch (err) {
2997 case 0:
2998 case -NFS4ERR_WRONGSEC:
2999 goto out;
3000 default:
3001 err = nfs4_handle_exception(server, err, &exception);
3002 }
3003 } while (exception.retry);
3004 out:
3005 return err;
3006 }
3007
3008 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3009 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3010 {
3011 struct rpc_auth_create_args auth_args = {
3012 .pseudoflavor = flavor,
3013 };
3014 struct rpc_auth *auth;
3015 int ret;
3016
3017 auth = rpcauth_create(&auth_args, server->client);
3018 if (IS_ERR(auth)) {
3019 ret = -EACCES;
3020 goto out;
3021 }
3022 ret = nfs4_lookup_root(server, fhandle, info);
3023 out:
3024 return ret;
3025 }
3026
3027 /*
3028 * Retry pseudoroot lookup with various security flavors. We do this when:
3029 *
3030 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3031 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3032 *
3033 * Returns zero on success, or a negative NFS4ERR value, or a
3034 * negative errno value.
3035 */
3036 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3037 struct nfs_fsinfo *info)
3038 {
3039 /* Per 3530bis 15.33.5 */
3040 static const rpc_authflavor_t flav_array[] = {
3041 RPC_AUTH_GSS_KRB5P,
3042 RPC_AUTH_GSS_KRB5I,
3043 RPC_AUTH_GSS_KRB5,
3044 RPC_AUTH_UNIX, /* courtesy */
3045 RPC_AUTH_NULL,
3046 };
3047 int status = -EPERM;
3048 size_t i;
3049
3050 if (server->auth_info.flavor_len > 0) {
3051 /* try each flavor specified by user */
3052 for (i = 0; i < server->auth_info.flavor_len; i++) {
3053 status = nfs4_lookup_root_sec(server, fhandle, info,
3054 server->auth_info.flavors[i]);
3055 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3056 continue;
3057 break;
3058 }
3059 } else {
3060 /* no flavors specified by user, try default list */
3061 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3062 status = nfs4_lookup_root_sec(server, fhandle, info,
3063 flav_array[i]);
3064 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3065 continue;
3066 break;
3067 }
3068 }
3069
3070 /*
3071 * -EACCESS could mean that the user doesn't have correct permissions
3072 * to access the mount. It could also mean that we tried to mount
3073 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3074 * existing mount programs don't handle -EACCES very well so it should
3075 * be mapped to -EPERM instead.
3076 */
3077 if (status == -EACCES)
3078 status = -EPERM;
3079 return status;
3080 }
3081
3082 static int nfs4_do_find_root_sec(struct nfs_server *server,
3083 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
3084 {
3085 int mv = server->nfs_client->cl_minorversion;
3086 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
3087 }
3088
3089 /**
3090 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3091 * @server: initialized nfs_server handle
3092 * @fhandle: we fill in the pseudo-fs root file handle
3093 * @info: we fill in an FSINFO struct
3094 * @auth_probe: probe the auth flavours
3095 *
3096 * Returns zero on success, or a negative errno.
3097 */
3098 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3099 struct nfs_fsinfo *info,
3100 bool auth_probe)
3101 {
3102 int status = 0;
3103
3104 if (!auth_probe)
3105 status = nfs4_lookup_root(server, fhandle, info);
3106
3107 if (auth_probe || status == NFS4ERR_WRONGSEC)
3108 status = nfs4_do_find_root_sec(server, fhandle, info);
3109
3110 if (status == 0)
3111 status = nfs4_server_capabilities(server, fhandle);
3112 if (status == 0)
3113 status = nfs4_do_fsinfo(server, fhandle, info);
3114
3115 return nfs4_map_errors(status);
3116 }
3117
3118 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3119 struct nfs_fsinfo *info)
3120 {
3121 int error;
3122 struct nfs_fattr *fattr = info->fattr;
3123 struct nfs4_label *label = NULL;
3124
3125 error = nfs4_server_capabilities(server, mntfh);
3126 if (error < 0) {
3127 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3128 return error;
3129 }
3130
3131 label = nfs4_label_alloc(server, GFP_KERNEL);
3132 if (IS_ERR(label))
3133 return PTR_ERR(label);
3134
3135 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3136 if (error < 0) {
3137 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3138 goto err_free_label;
3139 }
3140
3141 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3142 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3143 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3144
3145 err_free_label:
3146 nfs4_label_free(label);
3147
3148 return error;
3149 }
3150
3151 /*
3152 * Get locations and (maybe) other attributes of a referral.
3153 * Note that we'll actually follow the referral later when
3154 * we detect fsid mismatch in inode revalidation
3155 */
3156 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3157 const struct qstr *name, struct nfs_fattr *fattr,
3158 struct nfs_fh *fhandle)
3159 {
3160 int status = -ENOMEM;
3161 struct page *page = NULL;
3162 struct nfs4_fs_locations *locations = NULL;
3163
3164 page = alloc_page(GFP_KERNEL);
3165 if (page == NULL)
3166 goto out;
3167 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3168 if (locations == NULL)
3169 goto out;
3170
3171 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3172 if (status != 0)
3173 goto out;
3174
3175 /*
3176 * If the fsid didn't change, this is a migration event, not a
3177 * referral. Cause us to drop into the exception handler, which
3178 * will kick off migration recovery.
3179 */
3180 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3181 dprintk("%s: server did not return a different fsid for"
3182 " a referral at %s\n", __func__, name->name);
3183 status = -NFS4ERR_MOVED;
3184 goto out;
3185 }
3186 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3187 nfs_fixup_referral_attributes(&locations->fattr);
3188
3189 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3190 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3191 memset(fhandle, 0, sizeof(struct nfs_fh));
3192 out:
3193 if (page)
3194 __free_page(page);
3195 kfree(locations);
3196 return status;
3197 }
3198
3199 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3200 struct nfs_fattr *fattr, struct nfs4_label *label)
3201 {
3202 struct nfs4_getattr_arg args = {
3203 .fh = fhandle,
3204 .bitmask = server->attr_bitmask,
3205 };
3206 struct nfs4_getattr_res res = {
3207 .fattr = fattr,
3208 .label = label,
3209 .server = server,
3210 };
3211 struct rpc_message msg = {
3212 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3213 .rpc_argp = &args,
3214 .rpc_resp = &res,
3215 };
3216
3217 args.bitmask = nfs4_bitmask(server, label);
3218
3219 nfs_fattr_init(fattr);
3220 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3221 }
3222
3223 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3224 struct nfs_fattr *fattr, struct nfs4_label *label)
3225 {
3226 struct nfs4_exception exception = { };
3227 int err;
3228 do {
3229 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3230 trace_nfs4_getattr(server, fhandle, fattr, err);
3231 err = nfs4_handle_exception(server, err,
3232 &exception);
3233 } while (exception.retry);
3234 return err;
3235 }
3236
3237 /*
3238 * The file is not closed if it is opened due to the a request to change
3239 * the size of the file. The open call will not be needed once the
3240 * VFS layer lookup-intents are implemented.
3241 *
3242 * Close is called when the inode is destroyed.
3243 * If we haven't opened the file for O_WRONLY, we
3244 * need to in the size_change case to obtain a stateid.
3245 *
3246 * Got race?
3247 * Because OPEN is always done by name in nfsv4, it is
3248 * possible that we opened a different file by the same
3249 * name. We can recognize this race condition, but we
3250 * can't do anything about it besides returning an error.
3251 *
3252 * This will be fixed with VFS changes (lookup-intent).
3253 */
3254 static int
3255 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3256 struct iattr *sattr)
3257 {
3258 struct inode *inode = d_inode(dentry);
3259 struct rpc_cred *cred = NULL;
3260 struct nfs4_state *state = NULL;
3261 struct nfs4_label *label = NULL;
3262 int status;
3263
3264 if (pnfs_ld_layoutret_on_setattr(inode) &&
3265 sattr->ia_valid & ATTR_SIZE &&
3266 sattr->ia_size < i_size_read(inode))
3267 pnfs_commit_and_return_layout(inode);
3268
3269 nfs_fattr_init(fattr);
3270
3271 /* Deal with open(O_TRUNC) */
3272 if (sattr->ia_valid & ATTR_OPEN)
3273 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3274
3275 /* Optimization: if the end result is no change, don't RPC */
3276 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3277 return 0;
3278
3279 /* Search for an existing open(O_WRITE) file */
3280 if (sattr->ia_valid & ATTR_FILE) {
3281 struct nfs_open_context *ctx;
3282
3283 ctx = nfs_file_open_context(sattr->ia_file);
3284 if (ctx) {
3285 cred = ctx->cred;
3286 state = ctx->state;
3287 }
3288 }
3289
3290 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3291 if (IS_ERR(label))
3292 return PTR_ERR(label);
3293
3294 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3295 if (status == 0) {
3296 nfs_setattr_update_inode(inode, sattr, fattr);
3297 nfs_setsecurity(inode, fattr, label);
3298 }
3299 nfs4_label_free(label);
3300 return status;
3301 }
3302
3303 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3304 const struct qstr *name, struct nfs_fh *fhandle,
3305 struct nfs_fattr *fattr, struct nfs4_label *label)
3306 {
3307 struct nfs_server *server = NFS_SERVER(dir);
3308 int status;
3309 struct nfs4_lookup_arg args = {
3310 .bitmask = server->attr_bitmask,
3311 .dir_fh = NFS_FH(dir),
3312 .name = name,
3313 };
3314 struct nfs4_lookup_res res = {
3315 .server = server,
3316 .fattr = fattr,
3317 .label = label,
3318 .fh = fhandle,
3319 };
3320 struct rpc_message msg = {
3321 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3322 .rpc_argp = &args,
3323 .rpc_resp = &res,
3324 };
3325
3326 args.bitmask = nfs4_bitmask(server, label);
3327
3328 nfs_fattr_init(fattr);
3329
3330 dprintk("NFS call lookup %s\n", name->name);
3331 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3332 dprintk("NFS reply lookup: %d\n", status);
3333 return status;
3334 }
3335
3336 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3337 {
3338 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3339 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3340 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3341 fattr->nlink = 2;
3342 }
3343
3344 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3345 struct qstr *name, struct nfs_fh *fhandle,
3346 struct nfs_fattr *fattr, struct nfs4_label *label)
3347 {
3348 struct nfs4_exception exception = { };
3349 struct rpc_clnt *client = *clnt;
3350 int err;
3351 do {
3352 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3353 trace_nfs4_lookup(dir, name, err);
3354 switch (err) {
3355 case -NFS4ERR_BADNAME:
3356 err = -ENOENT;
3357 goto out;
3358 case -NFS4ERR_MOVED:
3359 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3360 if (err == -NFS4ERR_MOVED)
3361 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3362 goto out;
3363 case -NFS4ERR_WRONGSEC:
3364 err = -EPERM;
3365 if (client != *clnt)
3366 goto out;
3367 client = nfs4_negotiate_security(client, dir, name);
3368 if (IS_ERR(client))
3369 return PTR_ERR(client);
3370
3371 exception.retry = 1;
3372 break;
3373 default:
3374 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3375 }
3376 } while (exception.retry);
3377
3378 out:
3379 if (err == 0)
3380 *clnt = client;
3381 else if (client != *clnt)
3382 rpc_shutdown_client(client);
3383
3384 return err;
3385 }
3386
3387 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
3388 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3389 struct nfs4_label *label)
3390 {
3391 int status;
3392 struct rpc_clnt *client = NFS_CLIENT(dir);
3393
3394 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3395 if (client != NFS_CLIENT(dir)) {
3396 rpc_shutdown_client(client);
3397 nfs_fixup_secinfo_attributes(fattr);
3398 }
3399 return status;
3400 }
3401
3402 struct rpc_clnt *
3403 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3404 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3405 {
3406 struct rpc_clnt *client = NFS_CLIENT(dir);
3407 int status;
3408
3409 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3410 if (status < 0)
3411 return ERR_PTR(status);
3412 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3413 }
3414
3415 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3416 {
3417 struct nfs_server *server = NFS_SERVER(inode);
3418 struct nfs4_accessargs args = {
3419 .fh = NFS_FH(inode),
3420 .bitmask = server->cache_consistency_bitmask,
3421 };
3422 struct nfs4_accessres res = {
3423 .server = server,
3424 };
3425 struct rpc_message msg = {
3426 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3427 .rpc_argp = &args,
3428 .rpc_resp = &res,
3429 .rpc_cred = entry->cred,
3430 };
3431 int mode = entry->mask;
3432 int status = 0;
3433
3434 /*
3435 * Determine which access bits we want to ask for...
3436 */
3437 if (mode & MAY_READ)
3438 args.access |= NFS4_ACCESS_READ;
3439 if (S_ISDIR(inode->i_mode)) {
3440 if (mode & MAY_WRITE)
3441 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3442 if (mode & MAY_EXEC)
3443 args.access |= NFS4_ACCESS_LOOKUP;
3444 } else {
3445 if (mode & MAY_WRITE)
3446 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3447 if (mode & MAY_EXEC)
3448 args.access |= NFS4_ACCESS_EXECUTE;
3449 }
3450
3451 res.fattr = nfs_alloc_fattr();
3452 if (res.fattr == NULL)
3453 return -ENOMEM;
3454
3455 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3456 if (!status) {
3457 nfs_access_set_mask(entry, res.access);
3458 nfs_refresh_inode(inode, res.fattr);
3459 }
3460 nfs_free_fattr(res.fattr);
3461 return status;
3462 }
3463
3464 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3465 {
3466 struct nfs4_exception exception = { };
3467 int err;
3468 do {
3469 err = _nfs4_proc_access(inode, entry);
3470 trace_nfs4_access(inode, err);
3471 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3472 &exception);
3473 } while (exception.retry);
3474 return err;
3475 }
3476
3477 /*
3478 * TODO: For the time being, we don't try to get any attributes
3479 * along with any of the zero-copy operations READ, READDIR,
3480 * READLINK, WRITE.
3481 *
3482 * In the case of the first three, we want to put the GETATTR
3483 * after the read-type operation -- this is because it is hard
3484 * to predict the length of a GETATTR response in v4, and thus
3485 * align the READ data correctly. This means that the GETATTR
3486 * may end up partially falling into the page cache, and we should
3487 * shift it into the 'tail' of the xdr_buf before processing.
3488 * To do this efficiently, we need to know the total length
3489 * of data received, which doesn't seem to be available outside
3490 * of the RPC layer.
3491 *
3492 * In the case of WRITE, we also want to put the GETATTR after
3493 * the operation -- in this case because we want to make sure
3494 * we get the post-operation mtime and size.
3495 *
3496 * Both of these changes to the XDR layer would in fact be quite
3497 * minor, but I decided to leave them for a subsequent patch.
3498 */
3499 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3500 unsigned int pgbase, unsigned int pglen)
3501 {
3502 struct nfs4_readlink args = {
3503 .fh = NFS_FH(inode),
3504 .pgbase = pgbase,
3505 .pglen = pglen,
3506 .pages = &page,
3507 };
3508 struct nfs4_readlink_res res;
3509 struct rpc_message msg = {
3510 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3511 .rpc_argp = &args,
3512 .rpc_resp = &res,
3513 };
3514
3515 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3516 }
3517
3518 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3519 unsigned int pgbase, unsigned int pglen)
3520 {
3521 struct nfs4_exception exception = { };
3522 int err;
3523 do {
3524 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3525 trace_nfs4_readlink(inode, err);
3526 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3527 &exception);
3528 } while (exception.retry);
3529 return err;
3530 }
3531
3532 /*
3533 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3534 */
3535 static int
3536 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3537 int flags)
3538 {
3539 struct nfs4_label l, *ilabel = NULL;
3540 struct nfs_open_context *ctx;
3541 struct nfs4_state *state;
3542 int opened = 0;
3543 int status = 0;
3544
3545 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3546 if (IS_ERR(ctx))
3547 return PTR_ERR(ctx);
3548
3549 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3550
3551 sattr->ia_mode &= ~current_umask();
3552 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened);
3553 if (IS_ERR(state)) {
3554 status = PTR_ERR(state);
3555 goto out;
3556 }
3557 out:
3558 nfs4_label_release_security(ilabel);
3559 put_nfs_open_context(ctx);
3560 return status;
3561 }
3562
3563 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3564 {
3565 struct nfs_server *server = NFS_SERVER(dir);
3566 struct nfs_removeargs args = {
3567 .fh = NFS_FH(dir),
3568 .name = *name,
3569 };
3570 struct nfs_removeres res = {
3571 .server = server,
3572 };
3573 struct rpc_message msg = {
3574 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3575 .rpc_argp = &args,
3576 .rpc_resp = &res,
3577 };
3578 int status;
3579
3580 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3581 if (status == 0)
3582 update_changeattr(dir, &res.cinfo);
3583 return status;
3584 }
3585
3586 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3587 {
3588 struct nfs4_exception exception = { };
3589 int err;
3590 do {
3591 err = _nfs4_proc_remove(dir, name);
3592 trace_nfs4_remove(dir, name, err);
3593 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3594 &exception);
3595 } while (exception.retry);
3596 return err;
3597 }
3598
3599 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3600 {
3601 struct nfs_server *server = NFS_SERVER(dir);
3602 struct nfs_removeargs *args = msg->rpc_argp;
3603 struct nfs_removeres *res = msg->rpc_resp;
3604
3605 res->server = server;
3606 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3607 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3608
3609 nfs_fattr_init(res->dir_attr);
3610 }
3611
3612 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3613 {
3614 nfs4_setup_sequence(NFS_SERVER(data->dir),
3615 &data->args.seq_args,
3616 &data->res.seq_res,
3617 task);
3618 }
3619
3620 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3621 {
3622 struct nfs_unlinkdata *data = task->tk_calldata;
3623 struct nfs_removeres *res = &data->res;
3624
3625 if (!nfs4_sequence_done(task, &res->seq_res))
3626 return 0;
3627 if (nfs4_async_handle_error(task, res->server, NULL,
3628 &data->timeout) == -EAGAIN)
3629 return 0;
3630 update_changeattr(dir, &res->cinfo);
3631 return 1;
3632 }
3633
3634 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3635 {
3636 struct nfs_server *server = NFS_SERVER(dir);
3637 struct nfs_renameargs *arg = msg->rpc_argp;
3638 struct nfs_renameres *res = msg->rpc_resp;
3639
3640 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3641 res->server = server;
3642 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3643 }
3644
3645 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3646 {
3647 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3648 &data->args.seq_args,
3649 &data->res.seq_res,
3650 task);
3651 }
3652
3653 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3654 struct inode *new_dir)
3655 {
3656 struct nfs_renamedata *data = task->tk_calldata;
3657 struct nfs_renameres *res = &data->res;
3658
3659 if (!nfs4_sequence_done(task, &res->seq_res))
3660 return 0;
3661 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3662 return 0;
3663
3664 update_changeattr(old_dir, &res->old_cinfo);
3665 update_changeattr(new_dir, &res->new_cinfo);
3666 return 1;
3667 }
3668
3669 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3670 {
3671 struct nfs_server *server = NFS_SERVER(inode);
3672 struct nfs4_link_arg arg = {
3673 .fh = NFS_FH(inode),
3674 .dir_fh = NFS_FH(dir),
3675 .name = name,
3676 .bitmask = server->attr_bitmask,
3677 };
3678 struct nfs4_link_res res = {
3679 .server = server,
3680 .label = NULL,
3681 };
3682 struct rpc_message msg = {
3683 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3684 .rpc_argp = &arg,
3685 .rpc_resp = &res,
3686 };
3687 int status = -ENOMEM;
3688
3689 res.fattr = nfs_alloc_fattr();
3690 if (res.fattr == NULL)
3691 goto out;
3692
3693 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3694 if (IS_ERR(res.label)) {
3695 status = PTR_ERR(res.label);
3696 goto out;
3697 }
3698 arg.bitmask = nfs4_bitmask(server, res.label);
3699
3700 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3701 if (!status) {
3702 update_changeattr(dir, &res.cinfo);
3703 status = nfs_post_op_update_inode(inode, res.fattr);
3704 if (!status)
3705 nfs_setsecurity(inode, res.fattr, res.label);
3706 }
3707
3708
3709 nfs4_label_free(res.label);
3710
3711 out:
3712 nfs_free_fattr(res.fattr);
3713 return status;
3714 }
3715
3716 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3717 {
3718 struct nfs4_exception exception = { };
3719 int err;
3720 do {
3721 err = nfs4_handle_exception(NFS_SERVER(inode),
3722 _nfs4_proc_link(inode, dir, name),
3723 &exception);
3724 } while (exception.retry);
3725 return err;
3726 }
3727
3728 struct nfs4_createdata {
3729 struct rpc_message msg;
3730 struct nfs4_create_arg arg;
3731 struct nfs4_create_res res;
3732 struct nfs_fh fh;
3733 struct nfs_fattr fattr;
3734 struct nfs4_label *label;
3735 };
3736
3737 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3738 struct qstr *name, struct iattr *sattr, u32 ftype)
3739 {
3740 struct nfs4_createdata *data;
3741
3742 data = kzalloc(sizeof(*data), GFP_KERNEL);
3743 if (data != NULL) {
3744 struct nfs_server *server = NFS_SERVER(dir);
3745
3746 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3747 if (IS_ERR(data->label))
3748 goto out_free;
3749
3750 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3751 data->msg.rpc_argp = &data->arg;
3752 data->msg.rpc_resp = &data->res;
3753 data->arg.dir_fh = NFS_FH(dir);
3754 data->arg.server = server;
3755 data->arg.name = name;
3756 data->arg.attrs = sattr;
3757 data->arg.ftype = ftype;
3758 data->arg.bitmask = nfs4_bitmask(server, data->label);
3759 data->res.server = server;
3760 data->res.fh = &data->fh;
3761 data->res.fattr = &data->fattr;
3762 data->res.label = data->label;
3763 nfs_fattr_init(data->res.fattr);
3764 }
3765 return data;
3766 out_free:
3767 kfree(data);
3768 return NULL;
3769 }
3770
3771 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3772 {
3773 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3774 &data->arg.seq_args, &data->res.seq_res, 1);
3775 if (status == 0) {
3776 update_changeattr(dir, &data->res.dir_cinfo);
3777 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
3778 }
3779 return status;
3780 }
3781
3782 static void nfs4_free_createdata(struct nfs4_createdata *data)
3783 {
3784 nfs4_label_free(data->label);
3785 kfree(data);
3786 }
3787
3788 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3789 struct page *page, unsigned int len, struct iattr *sattr,
3790 struct nfs4_label *label)
3791 {
3792 struct nfs4_createdata *data;
3793 int status = -ENAMETOOLONG;
3794
3795 if (len > NFS4_MAXPATHLEN)
3796 goto out;
3797
3798 status = -ENOMEM;
3799 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3800 if (data == NULL)
3801 goto out;
3802
3803 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3804 data->arg.u.symlink.pages = &page;
3805 data->arg.u.symlink.len = len;
3806 data->arg.label = label;
3807
3808 status = nfs4_do_create(dir, dentry, data);
3809
3810 nfs4_free_createdata(data);
3811 out:
3812 return status;
3813 }
3814
3815 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3816 struct page *page, unsigned int len, struct iattr *sattr)
3817 {
3818 struct nfs4_exception exception = { };
3819 struct nfs4_label l, *label = NULL;
3820 int err;
3821
3822 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3823
3824 do {
3825 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
3826 trace_nfs4_symlink(dir, &dentry->d_name, err);
3827 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3828 &exception);
3829 } while (exception.retry);
3830
3831 nfs4_label_release_security(label);
3832 return err;
3833 }
3834
3835 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3836 struct iattr *sattr, struct nfs4_label *label)
3837 {
3838 struct nfs4_createdata *data;
3839 int status = -ENOMEM;
3840
3841 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3842 if (data == NULL)
3843 goto out;
3844
3845 data->arg.label = label;
3846 status = nfs4_do_create(dir, dentry, data);
3847
3848 nfs4_free_createdata(data);
3849 out:
3850 return status;
3851 }
3852
3853 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3854 struct iattr *sattr)
3855 {
3856 struct nfs4_exception exception = { };
3857 struct nfs4_label l, *label = NULL;
3858 int err;
3859
3860 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3861
3862 sattr->ia_mode &= ~current_umask();
3863 do {
3864 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
3865 trace_nfs4_mkdir(dir, &dentry->d_name, err);
3866 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3867 &exception);
3868 } while (exception.retry);
3869 nfs4_label_release_security(label);
3870
3871 return err;
3872 }
3873
3874 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3875 u64 cookie, struct page **pages, unsigned int count, int plus)
3876 {
3877 struct inode *dir = d_inode(dentry);
3878 struct nfs4_readdir_arg args = {
3879 .fh = NFS_FH(dir),
3880 .pages = pages,
3881 .pgbase = 0,
3882 .count = count,
3883 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
3884 .plus = plus,
3885 };
3886 struct nfs4_readdir_res res;
3887 struct rpc_message msg = {
3888 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3889 .rpc_argp = &args,
3890 .rpc_resp = &res,
3891 .rpc_cred = cred,
3892 };
3893 int status;
3894
3895 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
3896 dentry,
3897 (unsigned long long)cookie);
3898 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3899 res.pgbase = args.pgbase;
3900 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3901 if (status >= 0) {
3902 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3903 status += args.pgbase;
3904 }
3905
3906 nfs_invalidate_atime(dir);
3907
3908 dprintk("%s: returns %d\n", __func__, status);
3909 return status;
3910 }
3911
3912 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3913 u64 cookie, struct page **pages, unsigned int count, int plus)
3914 {
3915 struct nfs4_exception exception = { };
3916 int err;
3917 do {
3918 err = _nfs4_proc_readdir(dentry, cred, cookie,
3919 pages, count, plus);
3920 trace_nfs4_readdir(d_inode(dentry), err);
3921 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
3922 &exception);
3923 } while (exception.retry);
3924 return err;
3925 }
3926
3927 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3928 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
3929 {
3930 struct nfs4_createdata *data;
3931 int mode = sattr->ia_mode;
3932 int status = -ENOMEM;
3933
3934 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3935 if (data == NULL)
3936 goto out;
3937
3938 if (S_ISFIFO(mode))
3939 data->arg.ftype = NF4FIFO;
3940 else if (S_ISBLK(mode)) {
3941 data->arg.ftype = NF4BLK;
3942 data->arg.u.device.specdata1 = MAJOR(rdev);
3943 data->arg.u.device.specdata2 = MINOR(rdev);
3944 }
3945 else if (S_ISCHR(mode)) {
3946 data->arg.ftype = NF4CHR;
3947 data->arg.u.device.specdata1 = MAJOR(rdev);
3948 data->arg.u.device.specdata2 = MINOR(rdev);
3949 } else if (!S_ISSOCK(mode)) {
3950 status = -EINVAL;
3951 goto out_free;
3952 }
3953
3954 data->arg.label = label;
3955 status = nfs4_do_create(dir, dentry, data);
3956 out_free:
3957 nfs4_free_createdata(data);
3958 out:
3959 return status;
3960 }
3961
3962 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3963 struct iattr *sattr, dev_t rdev)
3964 {
3965 struct nfs4_exception exception = { };
3966 struct nfs4_label l, *label = NULL;
3967 int err;
3968
3969 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3970
3971 sattr->ia_mode &= ~current_umask();
3972 do {
3973 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
3974 trace_nfs4_mknod(dir, &dentry->d_name, err);
3975 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3976 &exception);
3977 } while (exception.retry);
3978
3979 nfs4_label_release_security(label);
3980
3981 return err;
3982 }
3983
3984 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3985 struct nfs_fsstat *fsstat)
3986 {
3987 struct nfs4_statfs_arg args = {
3988 .fh = fhandle,
3989 .bitmask = server->attr_bitmask,
3990 };
3991 struct nfs4_statfs_res res = {
3992 .fsstat = fsstat,
3993 };
3994 struct rpc_message msg = {
3995 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3996 .rpc_argp = &args,
3997 .rpc_resp = &res,
3998 };
3999
4000 nfs_fattr_init(fsstat->fattr);
4001 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4002 }
4003
4004 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4005 {
4006 struct nfs4_exception exception = { };
4007 int err;
4008 do {
4009 err = nfs4_handle_exception(server,
4010 _nfs4_proc_statfs(server, fhandle, fsstat),
4011 &exception);
4012 } while (exception.retry);
4013 return err;
4014 }
4015
4016 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4017 struct nfs_fsinfo *fsinfo)
4018 {
4019 struct nfs4_fsinfo_arg args = {
4020 .fh = fhandle,
4021 .bitmask = server->attr_bitmask,
4022 };
4023 struct nfs4_fsinfo_res res = {
4024 .fsinfo = fsinfo,
4025 };
4026 struct rpc_message msg = {
4027 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4028 .rpc_argp = &args,
4029 .rpc_resp = &res,
4030 };
4031
4032 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4033 }
4034
4035 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4036 {
4037 struct nfs4_exception exception = { };
4038 unsigned long now = jiffies;
4039 int err;
4040
4041 do {
4042 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4043 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4044 if (err == 0) {
4045 struct nfs_client *clp = server->nfs_client;
4046
4047 spin_lock(&clp->cl_lock);
4048 clp->cl_lease_time = fsinfo->lease_time * HZ;
4049 clp->cl_last_renewal = now;
4050 spin_unlock(&clp->cl_lock);
4051 break;
4052 }
4053 err = nfs4_handle_exception(server, err, &exception);
4054 } while (exception.retry);
4055 return err;
4056 }
4057
4058 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4059 {
4060 int error;
4061
4062 nfs_fattr_init(fsinfo->fattr);
4063 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4064 if (error == 0) {
4065 /* block layout checks this! */
4066 server->pnfs_blksize = fsinfo->blksize;
4067 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4068 }
4069
4070 return error;
4071 }
4072
4073 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4074 struct nfs_pathconf *pathconf)
4075 {
4076 struct nfs4_pathconf_arg args = {
4077 .fh = fhandle,
4078 .bitmask = server->attr_bitmask,
4079 };
4080 struct nfs4_pathconf_res res = {
4081 .pathconf = pathconf,
4082 };
4083 struct rpc_message msg = {
4084 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4085 .rpc_argp = &args,
4086 .rpc_resp = &res,
4087 };
4088
4089 /* None of the pathconf attributes are mandatory to implement */
4090 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4091 memset(pathconf, 0, sizeof(*pathconf));
4092 return 0;
4093 }
4094
4095 nfs_fattr_init(pathconf->fattr);
4096 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4097 }
4098
4099 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4100 struct nfs_pathconf *pathconf)
4101 {
4102 struct nfs4_exception exception = { };
4103 int err;
4104
4105 do {
4106 err = nfs4_handle_exception(server,
4107 _nfs4_proc_pathconf(server, fhandle, pathconf),
4108 &exception);
4109 } while (exception.retry);
4110 return err;
4111 }
4112
4113 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4114 const struct nfs_open_context *ctx,
4115 const struct nfs_lock_context *l_ctx,
4116 fmode_t fmode)
4117 {
4118 const struct nfs_lockowner *lockowner = NULL;
4119
4120 if (l_ctx != NULL)
4121 lockowner = &l_ctx->lockowner;
4122 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
4123 }
4124 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4125
4126 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4127 const struct nfs_open_context *ctx,
4128 const struct nfs_lock_context *l_ctx,
4129 fmode_t fmode)
4130 {
4131 nfs4_stateid current_stateid;
4132
4133 /* If the current stateid represents a lost lock, then exit */
4134 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4135 return true;
4136 return nfs4_stateid_match(stateid, &current_stateid);
4137 }
4138
4139 static bool nfs4_error_stateid_expired(int err)
4140 {
4141 switch (err) {
4142 case -NFS4ERR_DELEG_REVOKED:
4143 case -NFS4ERR_ADMIN_REVOKED:
4144 case -NFS4ERR_BAD_STATEID:
4145 case -NFS4ERR_STALE_STATEID:
4146 case -NFS4ERR_OLD_STATEID:
4147 case -NFS4ERR_OPENMODE:
4148 case -NFS4ERR_EXPIRED:
4149 return true;
4150 }
4151 return false;
4152 }
4153
4154 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4155 {
4156 nfs_invalidate_atime(hdr->inode);
4157 }
4158
4159 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4160 {
4161 struct nfs_server *server = NFS_SERVER(hdr->inode);
4162
4163 trace_nfs4_read(hdr, task->tk_status);
4164 if (nfs4_async_handle_error(task, server,
4165 hdr->args.context->state,
4166 NULL) == -EAGAIN) {
4167 rpc_restart_call_prepare(task);
4168 return -EAGAIN;
4169 }
4170
4171 __nfs4_read_done_cb(hdr);
4172 if (task->tk_status > 0)
4173 renew_lease(server, hdr->timestamp);
4174 return 0;
4175 }
4176
4177 static bool nfs4_read_stateid_changed(struct rpc_task *task,
4178 struct nfs_pgio_args *args)
4179 {
4180
4181 if (!nfs4_error_stateid_expired(task->tk_status) ||
4182 nfs4_stateid_is_current(&args->stateid,
4183 args->context,
4184 args->lock_context,
4185 FMODE_READ))
4186 return false;
4187 rpc_restart_call_prepare(task);
4188 return true;
4189 }
4190
4191 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4192 {
4193
4194 dprintk("--> %s\n", __func__);
4195
4196 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4197 return -EAGAIN;
4198 if (nfs4_read_stateid_changed(task, &hdr->args))
4199 return -EAGAIN;
4200 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4201 nfs4_read_done_cb(task, hdr);
4202 }
4203
4204 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4205 struct rpc_message *msg)
4206 {
4207 hdr->timestamp = jiffies;
4208 hdr->pgio_done_cb = nfs4_read_done_cb;
4209 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4210 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4211 }
4212
4213 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4214 struct nfs_pgio_header *hdr)
4215 {
4216 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4217 &hdr->args.seq_args,
4218 &hdr->res.seq_res,
4219 task))
4220 return 0;
4221 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4222 hdr->args.lock_context,
4223 hdr->rw_ops->rw_mode) == -EIO)
4224 return -EIO;
4225 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4226 return -EIO;
4227 return 0;
4228 }
4229
4230 static int nfs4_write_done_cb(struct rpc_task *task,
4231 struct nfs_pgio_header *hdr)
4232 {
4233 struct inode *inode = hdr->inode;
4234
4235 trace_nfs4_write(hdr, task->tk_status);
4236 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4237 hdr->args.context->state,
4238 NULL) == -EAGAIN) {
4239 rpc_restart_call_prepare(task);
4240 return -EAGAIN;
4241 }
4242 if (task->tk_status >= 0) {
4243 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4244 nfs_writeback_update_inode(hdr);
4245 }
4246 return 0;
4247 }
4248
4249 static bool nfs4_write_stateid_changed(struct rpc_task *task,
4250 struct nfs_pgio_args *args)
4251 {
4252
4253 if (!nfs4_error_stateid_expired(task->tk_status) ||
4254 nfs4_stateid_is_current(&args->stateid,
4255 args->context,
4256 args->lock_context,
4257 FMODE_WRITE))
4258 return false;
4259 rpc_restart_call_prepare(task);
4260 return true;
4261 }
4262
4263 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4264 {
4265 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4266 return -EAGAIN;
4267 if (nfs4_write_stateid_changed(task, &hdr->args))
4268 return -EAGAIN;
4269 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4270 nfs4_write_done_cb(task, hdr);
4271 }
4272
4273 static
4274 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4275 {
4276 /* Don't request attributes for pNFS or O_DIRECT writes */
4277 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4278 return false;
4279 /* Otherwise, request attributes if and only if we don't hold
4280 * a delegation
4281 */
4282 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4283 }
4284
4285 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4286 struct rpc_message *msg)
4287 {
4288 struct nfs_server *server = NFS_SERVER(hdr->inode);
4289
4290 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4291 hdr->args.bitmask = NULL;
4292 hdr->res.fattr = NULL;
4293 } else
4294 hdr->args.bitmask = server->cache_consistency_bitmask;
4295
4296 if (!hdr->pgio_done_cb)
4297 hdr->pgio_done_cb = nfs4_write_done_cb;
4298 hdr->res.server = server;
4299 hdr->timestamp = jiffies;
4300
4301 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4302 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4303 }
4304
4305 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4306 {
4307 nfs4_setup_sequence(NFS_SERVER(data->inode),
4308 &data->args.seq_args,
4309 &data->res.seq_res,
4310 task);
4311 }
4312
4313 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4314 {
4315 struct inode *inode = data->inode;
4316
4317 trace_nfs4_commit(data, task->tk_status);
4318 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4319 NULL, NULL) == -EAGAIN) {
4320 rpc_restart_call_prepare(task);
4321 return -EAGAIN;
4322 }
4323 return 0;
4324 }
4325
4326 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4327 {
4328 if (!nfs4_sequence_done(task, &data->res.seq_res))
4329 return -EAGAIN;
4330 return data->commit_done_cb(task, data);
4331 }
4332
4333 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4334 {
4335 struct nfs_server *server = NFS_SERVER(data->inode);
4336
4337 if (data->commit_done_cb == NULL)
4338 data->commit_done_cb = nfs4_commit_done_cb;
4339 data->res.server = server;
4340 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4341 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4342 }
4343
4344 struct nfs4_renewdata {
4345 struct nfs_client *client;
4346 unsigned long timestamp;
4347 };
4348
4349 /*
4350 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4351 * standalone procedure for queueing an asynchronous RENEW.
4352 */
4353 static void nfs4_renew_release(void *calldata)
4354 {
4355 struct nfs4_renewdata *data = calldata;
4356 struct nfs_client *clp = data->client;
4357
4358 if (atomic_read(&clp->cl_count) > 1)
4359 nfs4_schedule_state_renewal(clp);
4360 nfs_put_client(clp);
4361 kfree(data);
4362 }
4363
4364 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4365 {
4366 struct nfs4_renewdata *data = calldata;
4367 struct nfs_client *clp = data->client;
4368 unsigned long timestamp = data->timestamp;
4369
4370 trace_nfs4_renew_async(clp, task->tk_status);
4371 switch (task->tk_status) {
4372 case 0:
4373 break;
4374 case -NFS4ERR_LEASE_MOVED:
4375 nfs4_schedule_lease_moved_recovery(clp);
4376 break;
4377 default:
4378 /* Unless we're shutting down, schedule state recovery! */
4379 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4380 return;
4381 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4382 nfs4_schedule_lease_recovery(clp);
4383 return;
4384 }
4385 nfs4_schedule_path_down_recovery(clp);
4386 }
4387 do_renew_lease(clp, timestamp);
4388 }
4389
4390 static const struct rpc_call_ops nfs4_renew_ops = {
4391 .rpc_call_done = nfs4_renew_done,
4392 .rpc_release = nfs4_renew_release,
4393 };
4394
4395 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4396 {
4397 struct rpc_message msg = {
4398 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4399 .rpc_argp = clp,
4400 .rpc_cred = cred,
4401 };
4402 struct nfs4_renewdata *data;
4403
4404 if (renew_flags == 0)
4405 return 0;
4406 if (!atomic_inc_not_zero(&clp->cl_count))
4407 return -EIO;
4408 data = kmalloc(sizeof(*data), GFP_NOFS);
4409 if (data == NULL)
4410 return -ENOMEM;
4411 data->client = clp;
4412 data->timestamp = jiffies;
4413 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4414 &nfs4_renew_ops, data);
4415 }
4416
4417 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4418 {
4419 struct rpc_message msg = {
4420 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4421 .rpc_argp = clp,
4422 .rpc_cred = cred,
4423 };
4424 unsigned long now = jiffies;
4425 int status;
4426
4427 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4428 if (status < 0)
4429 return status;
4430 do_renew_lease(clp, now);
4431 return 0;
4432 }
4433
4434 static inline int nfs4_server_supports_acls(struct nfs_server *server)
4435 {
4436 return server->caps & NFS_CAP_ACLS;
4437 }
4438
4439 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4440 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4441 * the stack.
4442 */
4443 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4444
4445 static int buf_to_pages_noslab(const void *buf, size_t buflen,
4446 struct page **pages, unsigned int *pgbase)
4447 {
4448 struct page *newpage, **spages;
4449 int rc = 0;
4450 size_t len;
4451 spages = pages;
4452
4453 do {
4454 len = min_t(size_t, PAGE_SIZE, buflen);
4455 newpage = alloc_page(GFP_KERNEL);
4456
4457 if (newpage == NULL)
4458 goto unwind;
4459 memcpy(page_address(newpage), buf, len);
4460 buf += len;
4461 buflen -= len;
4462 *pages++ = newpage;
4463 rc++;
4464 } while (buflen != 0);
4465
4466 return rc;
4467
4468 unwind:
4469 for(; rc > 0; rc--)
4470 __free_page(spages[rc-1]);
4471 return -ENOMEM;
4472 }
4473
4474 struct nfs4_cached_acl {
4475 int cached;
4476 size_t len;
4477 char data[0];
4478 };
4479
4480 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4481 {
4482 struct nfs_inode *nfsi = NFS_I(inode);
4483
4484 spin_lock(&inode->i_lock);
4485 kfree(nfsi->nfs4_acl);
4486 nfsi->nfs4_acl = acl;
4487 spin_unlock(&inode->i_lock);
4488 }
4489
4490 static void nfs4_zap_acl_attr(struct inode *inode)
4491 {
4492 nfs4_set_cached_acl(inode, NULL);
4493 }
4494
4495 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4496 {
4497 struct nfs_inode *nfsi = NFS_I(inode);
4498 struct nfs4_cached_acl *acl;
4499 int ret = -ENOENT;
4500
4501 spin_lock(&inode->i_lock);
4502 acl = nfsi->nfs4_acl;
4503 if (acl == NULL)
4504 goto out;
4505 if (buf == NULL) /* user is just asking for length */
4506 goto out_len;
4507 if (acl->cached == 0)
4508 goto out;
4509 ret = -ERANGE; /* see getxattr(2) man page */
4510 if (acl->len > buflen)
4511 goto out;
4512 memcpy(buf, acl->data, acl->len);
4513 out_len:
4514 ret = acl->len;
4515 out:
4516 spin_unlock(&inode->i_lock);
4517 return ret;
4518 }
4519
4520 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4521 {
4522 struct nfs4_cached_acl *acl;
4523 size_t buflen = sizeof(*acl) + acl_len;
4524
4525 if (buflen <= PAGE_SIZE) {
4526 acl = kmalloc(buflen, GFP_KERNEL);
4527 if (acl == NULL)
4528 goto out;
4529 acl->cached = 1;
4530 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4531 } else {
4532 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4533 if (acl == NULL)
4534 goto out;
4535 acl->cached = 0;
4536 }
4537 acl->len = acl_len;
4538 out:
4539 nfs4_set_cached_acl(inode, acl);
4540 }
4541
4542 /*
4543 * The getxattr API returns the required buffer length when called with a
4544 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4545 * the required buf. On a NULL buf, we send a page of data to the server
4546 * guessing that the ACL request can be serviced by a page. If so, we cache
4547 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4548 * the cache. If not so, we throw away the page, and cache the required
4549 * length. The next getxattr call will then produce another round trip to
4550 * the server, this time with the input buf of the required size.
4551 */
4552 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4553 {
4554 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4555 struct nfs_getaclargs args = {
4556 .fh = NFS_FH(inode),
4557 .acl_pages = pages,
4558 .acl_len = buflen,
4559 };
4560 struct nfs_getaclres res = {
4561 .acl_len = buflen,
4562 };
4563 struct rpc_message msg = {
4564 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4565 .rpc_argp = &args,
4566 .rpc_resp = &res,
4567 };
4568 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4569 int ret = -ENOMEM, i;
4570
4571 /* As long as we're doing a round trip to the server anyway,
4572 * let's be prepared for a page of acl data. */
4573 if (npages == 0)
4574 npages = 1;
4575 if (npages > ARRAY_SIZE(pages))
4576 return -ERANGE;
4577
4578 for (i = 0; i < npages; i++) {
4579 pages[i] = alloc_page(GFP_KERNEL);
4580 if (!pages[i])
4581 goto out_free;
4582 }
4583
4584 /* for decoding across pages */
4585 res.acl_scratch = alloc_page(GFP_KERNEL);
4586 if (!res.acl_scratch)
4587 goto out_free;
4588
4589 args.acl_len = npages * PAGE_SIZE;
4590 args.acl_pgbase = 0;
4591
4592 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4593 __func__, buf, buflen, npages, args.acl_len);
4594 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4595 &msg, &args.seq_args, &res.seq_res, 0);
4596 if (ret)
4597 goto out_free;
4598
4599 /* Handle the case where the passed-in buffer is too short */
4600 if (res.acl_flags & NFS4_ACL_TRUNC) {
4601 /* Did the user only issue a request for the acl length? */
4602 if (buf == NULL)
4603 goto out_ok;
4604 ret = -ERANGE;
4605 goto out_free;
4606 }
4607 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4608 if (buf) {
4609 if (res.acl_len > buflen) {
4610 ret = -ERANGE;
4611 goto out_free;
4612 }
4613 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4614 }
4615 out_ok:
4616 ret = res.acl_len;
4617 out_free:
4618 for (i = 0; i < npages; i++)
4619 if (pages[i])
4620 __free_page(pages[i]);
4621 if (res.acl_scratch)
4622 __free_page(res.acl_scratch);
4623 return ret;
4624 }
4625
4626 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4627 {
4628 struct nfs4_exception exception = { };
4629 ssize_t ret;
4630 do {
4631 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4632 trace_nfs4_get_acl(inode, ret);
4633 if (ret >= 0)
4634 break;
4635 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4636 } while (exception.retry);
4637 return ret;
4638 }
4639
4640 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4641 {
4642 struct nfs_server *server = NFS_SERVER(inode);
4643 int ret;
4644
4645 if (!nfs4_server_supports_acls(server))
4646 return -EOPNOTSUPP;
4647 ret = nfs_revalidate_inode(server, inode);
4648 if (ret < 0)
4649 return ret;
4650 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4651 nfs_zap_acl_cache(inode);
4652 ret = nfs4_read_cached_acl(inode, buf, buflen);
4653 if (ret != -ENOENT)
4654 /* -ENOENT is returned if there is no ACL or if there is an ACL
4655 * but no cached acl data, just the acl length */
4656 return ret;
4657 return nfs4_get_acl_uncached(inode, buf, buflen);
4658 }
4659
4660 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4661 {
4662 struct nfs_server *server = NFS_SERVER(inode);
4663 struct page *pages[NFS4ACL_MAXPAGES];
4664 struct nfs_setaclargs arg = {
4665 .fh = NFS_FH(inode),
4666 .acl_pages = pages,
4667 .acl_len = buflen,
4668 };
4669 struct nfs_setaclres res;
4670 struct rpc_message msg = {
4671 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4672 .rpc_argp = &arg,
4673 .rpc_resp = &res,
4674 };
4675 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4676 int ret, i;
4677
4678 if (!nfs4_server_supports_acls(server))
4679 return -EOPNOTSUPP;
4680 if (npages > ARRAY_SIZE(pages))
4681 return -ERANGE;
4682 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
4683 if (i < 0)
4684 return i;
4685 nfs4_inode_return_delegation(inode);
4686 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4687
4688 /*
4689 * Free each page after tx, so the only ref left is
4690 * held by the network stack
4691 */
4692 for (; i > 0; i--)
4693 put_page(pages[i-1]);
4694
4695 /*
4696 * Acl update can result in inode attribute update.
4697 * so mark the attribute cache invalid.
4698 */
4699 spin_lock(&inode->i_lock);
4700 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4701 spin_unlock(&inode->i_lock);
4702 nfs_access_zap_cache(inode);
4703 nfs_zap_acl_cache(inode);
4704 return ret;
4705 }
4706
4707 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4708 {
4709 struct nfs4_exception exception = { };
4710 int err;
4711 do {
4712 err = __nfs4_proc_set_acl(inode, buf, buflen);
4713 trace_nfs4_set_acl(inode, err);
4714 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4715 &exception);
4716 } while (exception.retry);
4717 return err;
4718 }
4719
4720 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4721 static int _nfs4_get_security_label(struct inode *inode, void *buf,
4722 size_t buflen)
4723 {
4724 struct nfs_server *server = NFS_SERVER(inode);
4725 struct nfs_fattr fattr;
4726 struct nfs4_label label = {0, 0, buflen, buf};
4727
4728 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4729 struct nfs4_getattr_arg arg = {
4730 .fh = NFS_FH(inode),
4731 .bitmask = bitmask,
4732 };
4733 struct nfs4_getattr_res res = {
4734 .fattr = &fattr,
4735 .label = &label,
4736 .server = server,
4737 };
4738 struct rpc_message msg = {
4739 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4740 .rpc_argp = &arg,
4741 .rpc_resp = &res,
4742 };
4743 int ret;
4744
4745 nfs_fattr_init(&fattr);
4746
4747 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4748 if (ret)
4749 return ret;
4750 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4751 return -ENOENT;
4752 if (buflen < label.len)
4753 return -ERANGE;
4754 return 0;
4755 }
4756
4757 static int nfs4_get_security_label(struct inode *inode, void *buf,
4758 size_t buflen)
4759 {
4760 struct nfs4_exception exception = { };
4761 int err;
4762
4763 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4764 return -EOPNOTSUPP;
4765
4766 do {
4767 err = _nfs4_get_security_label(inode, buf, buflen);
4768 trace_nfs4_get_security_label(inode, err);
4769 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4770 &exception);
4771 } while (exception.retry);
4772 return err;
4773 }
4774
4775 static int _nfs4_do_set_security_label(struct inode *inode,
4776 struct nfs4_label *ilabel,
4777 struct nfs_fattr *fattr,
4778 struct nfs4_label *olabel)
4779 {
4780
4781 struct iattr sattr = {0};
4782 struct nfs_server *server = NFS_SERVER(inode);
4783 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4784 struct nfs_setattrargs arg = {
4785 .fh = NFS_FH(inode),
4786 .iap = &sattr,
4787 .server = server,
4788 .bitmask = bitmask,
4789 .label = ilabel,
4790 };
4791 struct nfs_setattrres res = {
4792 .fattr = fattr,
4793 .label = olabel,
4794 .server = server,
4795 };
4796 struct rpc_message msg = {
4797 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
4798 .rpc_argp = &arg,
4799 .rpc_resp = &res,
4800 };
4801 int status;
4802
4803 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
4804
4805 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4806 if (status)
4807 dprintk("%s failed: %d\n", __func__, status);
4808
4809 return status;
4810 }
4811
4812 static int nfs4_do_set_security_label(struct inode *inode,
4813 struct nfs4_label *ilabel,
4814 struct nfs_fattr *fattr,
4815 struct nfs4_label *olabel)
4816 {
4817 struct nfs4_exception exception = { };
4818 int err;
4819
4820 do {
4821 err = _nfs4_do_set_security_label(inode, ilabel,
4822 fattr, olabel);
4823 trace_nfs4_set_security_label(inode, err);
4824 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4825 &exception);
4826 } while (exception.retry);
4827 return err;
4828 }
4829
4830 static int
4831 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
4832 {
4833 struct nfs4_label ilabel, *olabel = NULL;
4834 struct nfs_fattr fattr;
4835 struct rpc_cred *cred;
4836 struct inode *inode = d_inode(dentry);
4837 int status;
4838
4839 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4840 return -EOPNOTSUPP;
4841
4842 nfs_fattr_init(&fattr);
4843
4844 ilabel.pi = 0;
4845 ilabel.lfs = 0;
4846 ilabel.label = (char *)buf;
4847 ilabel.len = buflen;
4848
4849 cred = rpc_lookup_cred();
4850 if (IS_ERR(cred))
4851 return PTR_ERR(cred);
4852
4853 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
4854 if (IS_ERR(olabel)) {
4855 status = -PTR_ERR(olabel);
4856 goto out;
4857 }
4858
4859 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
4860 if (status == 0)
4861 nfs_setsecurity(inode, &fattr, olabel);
4862
4863 nfs4_label_free(olabel);
4864 out:
4865 put_rpccred(cred);
4866 return status;
4867 }
4868 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
4869
4870
4871 static int
4872 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
4873 struct nfs4_state *state, long *timeout)
4874 {
4875 struct nfs_client *clp = server->nfs_client;
4876
4877 if (task->tk_status >= 0)
4878 return 0;
4879 switch(task->tk_status) {
4880 case -NFS4ERR_DELEG_REVOKED:
4881 case -NFS4ERR_ADMIN_REVOKED:
4882 case -NFS4ERR_BAD_STATEID:
4883 case -NFS4ERR_OPENMODE:
4884 if (state == NULL)
4885 break;
4886 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4887 goto recovery_failed;
4888 goto wait_on_recovery;
4889 case -NFS4ERR_EXPIRED:
4890 if (state != NULL) {
4891 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4892 goto recovery_failed;
4893 }
4894 case -NFS4ERR_STALE_STATEID:
4895 case -NFS4ERR_STALE_CLIENTID:
4896 nfs4_schedule_lease_recovery(clp);
4897 goto wait_on_recovery;
4898 case -NFS4ERR_MOVED:
4899 if (nfs4_schedule_migration_recovery(server) < 0)
4900 goto recovery_failed;
4901 goto wait_on_recovery;
4902 case -NFS4ERR_LEASE_MOVED:
4903 nfs4_schedule_lease_moved_recovery(clp);
4904 goto wait_on_recovery;
4905 #if defined(CONFIG_NFS_V4_1)
4906 case -NFS4ERR_BADSESSION:
4907 case -NFS4ERR_BADSLOT:
4908 case -NFS4ERR_BAD_HIGH_SLOT:
4909 case -NFS4ERR_DEADSESSION:
4910 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4911 case -NFS4ERR_SEQ_FALSE_RETRY:
4912 case -NFS4ERR_SEQ_MISORDERED:
4913 dprintk("%s ERROR %d, Reset session\n", __func__,
4914 task->tk_status);
4915 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4916 goto wait_on_recovery;
4917 #endif /* CONFIG_NFS_V4_1 */
4918 case -NFS4ERR_DELAY:
4919 nfs_inc_server_stats(server, NFSIOS_DELAY);
4920 rpc_delay(task, nfs4_update_delay(timeout));
4921 goto restart_call;
4922 case -NFS4ERR_GRACE:
4923 rpc_delay(task, NFS4_POLL_RETRY_MAX);
4924 case -NFS4ERR_RETRY_UNCACHED_REP:
4925 case -NFS4ERR_OLD_STATEID:
4926 goto restart_call;
4927 }
4928 task->tk_status = nfs4_map_errors(task->tk_status);
4929 return 0;
4930 recovery_failed:
4931 task->tk_status = -EIO;
4932 return 0;
4933 wait_on_recovery:
4934 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
4935 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
4936 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
4937 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
4938 goto recovery_failed;
4939 restart_call:
4940 task->tk_status = 0;
4941 return -EAGAIN;
4942 }
4943
4944 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
4945 nfs4_verifier *bootverf)
4946 {
4947 __be32 verf[2];
4948
4949 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
4950 /* An impossible timestamp guarantees this value
4951 * will never match a generated boot time. */
4952 verf[0] = 0;
4953 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
4954 } else {
4955 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
4956 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
4957 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
4958 }
4959 memcpy(bootverf->data, verf, sizeof(bootverf->data));
4960 }
4961
4962 static int
4963 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
4964 {
4965 int result;
4966 size_t len;
4967 char *str;
4968 bool retried = false;
4969
4970 if (clp->cl_owner_id != NULL)
4971 return 0;
4972 retry:
4973 rcu_read_lock();
4974 len = 10 + strlen(clp->cl_ipaddr) + 1 +
4975 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
4976 1 +
4977 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
4978 1;
4979 rcu_read_unlock();
4980
4981 if (len > NFS4_OPAQUE_LIMIT + 1)
4982 return -EINVAL;
4983
4984 /*
4985 * Since this string is allocated at mount time, and held until the
4986 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
4987 * about a memory-reclaim deadlock.
4988 */
4989 str = kmalloc(len, GFP_KERNEL);
4990 if (!str)
4991 return -ENOMEM;
4992
4993 rcu_read_lock();
4994 result = scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
4995 clp->cl_ipaddr,
4996 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
4997 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
4998 rcu_read_unlock();
4999
5000 /* Did something change? */
5001 if (result >= len) {
5002 kfree(str);
5003 if (retried)
5004 return -EINVAL;
5005 retried = true;
5006 goto retry;
5007 }
5008 clp->cl_owner_id = str;
5009 return 0;
5010 }
5011
5012 static int
5013 nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5014 {
5015 int result;
5016 size_t len;
5017 char *str;
5018
5019 len = 10 + 10 + 1 + 10 + 1 +
5020 strlen(nfs4_client_id_uniquifier) + 1 +
5021 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5022
5023 if (len > NFS4_OPAQUE_LIMIT + 1)
5024 return -EINVAL;
5025
5026 /*
5027 * Since this string is allocated at mount time, and held until the
5028 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5029 * about a memory-reclaim deadlock.
5030 */
5031 str = kmalloc(len, GFP_KERNEL);
5032 if (!str)
5033 return -ENOMEM;
5034
5035 result = scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5036 clp->rpc_ops->version, clp->cl_minorversion,
5037 nfs4_client_id_uniquifier,
5038 clp->cl_rpcclient->cl_nodename);
5039 if (result >= len) {
5040 kfree(str);
5041 return -EINVAL;
5042 }
5043 clp->cl_owner_id = str;
5044 return 0;
5045 }
5046
5047 static int
5048 nfs4_init_uniform_client_string(struct nfs_client *clp)
5049 {
5050 int result;
5051 size_t len;
5052 char *str;
5053
5054 if (clp->cl_owner_id != NULL)
5055 return 0;
5056
5057 if (nfs4_client_id_uniquifier[0] != '\0')
5058 return nfs4_init_uniquifier_client_string(clp);
5059
5060 len = 10 + 10 + 1 + 10 + 1 +
5061 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5062
5063 if (len > NFS4_OPAQUE_LIMIT + 1)
5064 return -EINVAL;
5065
5066 /*
5067 * Since this string is allocated at mount time, and held until the
5068 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5069 * about a memory-reclaim deadlock.
5070 */
5071 str = kmalloc(len, GFP_KERNEL);
5072 if (!str)
5073 return -ENOMEM;
5074
5075 result = scnprintf(str, len, "Linux NFSv%u.%u %s",
5076 clp->rpc_ops->version, clp->cl_minorversion,
5077 clp->cl_rpcclient->cl_nodename);
5078 if (result >= len) {
5079 kfree(str);
5080 return -EINVAL;
5081 }
5082 clp->cl_owner_id = str;
5083 return 0;
5084 }
5085
5086 /*
5087 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5088 * services. Advertise one based on the address family of the
5089 * clientaddr.
5090 */
5091 static unsigned int
5092 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5093 {
5094 if (strchr(clp->cl_ipaddr, ':') != NULL)
5095 return scnprintf(buf, len, "tcp6");
5096 else
5097 return scnprintf(buf, len, "tcp");
5098 }
5099
5100 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5101 {
5102 struct nfs4_setclientid *sc = calldata;
5103
5104 if (task->tk_status == 0)
5105 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5106 }
5107
5108 static const struct rpc_call_ops nfs4_setclientid_ops = {
5109 .rpc_call_done = nfs4_setclientid_done,
5110 };
5111
5112 /**
5113 * nfs4_proc_setclientid - Negotiate client ID
5114 * @clp: state data structure
5115 * @program: RPC program for NFSv4 callback service
5116 * @port: IP port number for NFS4 callback service
5117 * @cred: RPC credential to use for this call
5118 * @res: where to place the result
5119 *
5120 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5121 */
5122 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5123 unsigned short port, struct rpc_cred *cred,
5124 struct nfs4_setclientid_res *res)
5125 {
5126 nfs4_verifier sc_verifier;
5127 struct nfs4_setclientid setclientid = {
5128 .sc_verifier = &sc_verifier,
5129 .sc_prog = program,
5130 .sc_clnt = clp,
5131 };
5132 struct rpc_message msg = {
5133 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5134 .rpc_argp = &setclientid,
5135 .rpc_resp = res,
5136 .rpc_cred = cred,
5137 };
5138 struct rpc_task *task;
5139 struct rpc_task_setup task_setup_data = {
5140 .rpc_client = clp->cl_rpcclient,
5141 .rpc_message = &msg,
5142 .callback_ops = &nfs4_setclientid_ops,
5143 .callback_data = &setclientid,
5144 .flags = RPC_TASK_TIMEOUT,
5145 };
5146 int status;
5147
5148 /* nfs_client_id4 */
5149 nfs4_init_boot_verifier(clp, &sc_verifier);
5150
5151 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5152 status = nfs4_init_uniform_client_string(clp);
5153 else
5154 status = nfs4_init_nonuniform_client_string(clp);
5155
5156 if (status)
5157 goto out;
5158
5159 /* cb_client4 */
5160 setclientid.sc_netid_len =
5161 nfs4_init_callback_netid(clp,
5162 setclientid.sc_netid,
5163 sizeof(setclientid.sc_netid));
5164 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5165 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5166 clp->cl_ipaddr, port >> 8, port & 255);
5167
5168 dprintk("NFS call setclientid auth=%s, '%s'\n",
5169 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5170 clp->cl_owner_id);
5171 task = rpc_run_task(&task_setup_data);
5172 if (IS_ERR(task)) {
5173 status = PTR_ERR(task);
5174 goto out;
5175 }
5176 status = task->tk_status;
5177 if (setclientid.sc_cred) {
5178 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5179 put_rpccred(setclientid.sc_cred);
5180 }
5181 rpc_put_task(task);
5182 out:
5183 trace_nfs4_setclientid(clp, status);
5184 dprintk("NFS reply setclientid: %d\n", status);
5185 return status;
5186 }
5187
5188 /**
5189 * nfs4_proc_setclientid_confirm - Confirm client ID
5190 * @clp: state data structure
5191 * @res: result of a previous SETCLIENTID
5192 * @cred: RPC credential to use for this call
5193 *
5194 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5195 */
5196 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5197 struct nfs4_setclientid_res *arg,
5198 struct rpc_cred *cred)
5199 {
5200 struct rpc_message msg = {
5201 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5202 .rpc_argp = arg,
5203 .rpc_cred = cred,
5204 };
5205 int status;
5206
5207 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5208 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5209 clp->cl_clientid);
5210 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5211 trace_nfs4_setclientid_confirm(clp, status);
5212 dprintk("NFS reply setclientid_confirm: %d\n", status);
5213 return status;
5214 }
5215
5216 struct nfs4_delegreturndata {
5217 struct nfs4_delegreturnargs args;
5218 struct nfs4_delegreturnres res;
5219 struct nfs_fh fh;
5220 nfs4_stateid stateid;
5221 unsigned long timestamp;
5222 struct nfs_fattr fattr;
5223 int rpc_status;
5224 struct inode *inode;
5225 bool roc;
5226 u32 roc_barrier;
5227 };
5228
5229 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5230 {
5231 struct nfs4_delegreturndata *data = calldata;
5232
5233 if (!nfs4_sequence_done(task, &data->res.seq_res))
5234 return;
5235
5236 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5237 switch (task->tk_status) {
5238 case 0:
5239 renew_lease(data->res.server, data->timestamp);
5240 case -NFS4ERR_ADMIN_REVOKED:
5241 case -NFS4ERR_DELEG_REVOKED:
5242 case -NFS4ERR_BAD_STATEID:
5243 case -NFS4ERR_OLD_STATEID:
5244 case -NFS4ERR_STALE_STATEID:
5245 case -NFS4ERR_EXPIRED:
5246 task->tk_status = 0;
5247 if (data->roc)
5248 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5249 break;
5250 default:
5251 if (nfs4_async_handle_error(task, data->res.server,
5252 NULL, NULL) == -EAGAIN) {
5253 rpc_restart_call_prepare(task);
5254 return;
5255 }
5256 }
5257 data->rpc_status = task->tk_status;
5258 }
5259
5260 static void nfs4_delegreturn_release(void *calldata)
5261 {
5262 struct nfs4_delegreturndata *data = calldata;
5263 struct inode *inode = data->inode;
5264
5265 if (inode) {
5266 if (data->roc)
5267 pnfs_roc_release(inode);
5268 nfs_iput_and_deactive(inode);
5269 }
5270 kfree(calldata);
5271 }
5272
5273 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5274 {
5275 struct nfs4_delegreturndata *d_data;
5276
5277 d_data = (struct nfs4_delegreturndata *)data;
5278
5279 if (d_data->roc &&
5280 pnfs_roc_drain(d_data->inode, &d_data->roc_barrier, task))
5281 return;
5282
5283 nfs4_setup_sequence(d_data->res.server,
5284 &d_data->args.seq_args,
5285 &d_data->res.seq_res,
5286 task);
5287 }
5288
5289 static const struct rpc_call_ops nfs4_delegreturn_ops = {
5290 .rpc_call_prepare = nfs4_delegreturn_prepare,
5291 .rpc_call_done = nfs4_delegreturn_done,
5292 .rpc_release = nfs4_delegreturn_release,
5293 };
5294
5295 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5296 {
5297 struct nfs4_delegreturndata *data;
5298 struct nfs_server *server = NFS_SERVER(inode);
5299 struct rpc_task *task;
5300 struct rpc_message msg = {
5301 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5302 .rpc_cred = cred,
5303 };
5304 struct rpc_task_setup task_setup_data = {
5305 .rpc_client = server->client,
5306 .rpc_message = &msg,
5307 .callback_ops = &nfs4_delegreturn_ops,
5308 .flags = RPC_TASK_ASYNC,
5309 };
5310 int status = 0;
5311
5312 data = kzalloc(sizeof(*data), GFP_NOFS);
5313 if (data == NULL)
5314 return -ENOMEM;
5315 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5316 data->args.fhandle = &data->fh;
5317 data->args.stateid = &data->stateid;
5318 data->args.bitmask = server->cache_consistency_bitmask;
5319 nfs_copy_fh(&data->fh, NFS_FH(inode));
5320 nfs4_stateid_copy(&data->stateid, stateid);
5321 data->res.fattr = &data->fattr;
5322 data->res.server = server;
5323 nfs_fattr_init(data->res.fattr);
5324 data->timestamp = jiffies;
5325 data->rpc_status = 0;
5326 data->inode = nfs_igrab_and_active(inode);
5327 if (data->inode)
5328 data->roc = nfs4_roc(inode);
5329
5330 task_setup_data.callback_data = data;
5331 msg.rpc_argp = &data->args;
5332 msg.rpc_resp = &data->res;
5333 task = rpc_run_task(&task_setup_data);
5334 if (IS_ERR(task))
5335 return PTR_ERR(task);
5336 if (!issync)
5337 goto out;
5338 status = nfs4_wait_for_completion_rpc_task(task);
5339 if (status != 0)
5340 goto out;
5341 status = data->rpc_status;
5342 if (status == 0)
5343 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5344 else
5345 nfs_refresh_inode(inode, &data->fattr);
5346 out:
5347 rpc_put_task(task);
5348 return status;
5349 }
5350
5351 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5352 {
5353 struct nfs_server *server = NFS_SERVER(inode);
5354 struct nfs4_exception exception = { };
5355 int err;
5356 do {
5357 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5358 trace_nfs4_delegreturn(inode, err);
5359 switch (err) {
5360 case -NFS4ERR_STALE_STATEID:
5361 case -NFS4ERR_EXPIRED:
5362 case 0:
5363 return 0;
5364 }
5365 err = nfs4_handle_exception(server, err, &exception);
5366 } while (exception.retry);
5367 return err;
5368 }
5369
5370 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5371 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5372
5373 /*
5374 * sleep, with exponential backoff, and retry the LOCK operation.
5375 */
5376 static unsigned long
5377 nfs4_set_lock_task_retry(unsigned long timeout)
5378 {
5379 freezable_schedule_timeout_killable_unsafe(timeout);
5380 timeout <<= 1;
5381 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5382 return NFS4_LOCK_MAXTIMEOUT;
5383 return timeout;
5384 }
5385
5386 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5387 {
5388 struct inode *inode = state->inode;
5389 struct nfs_server *server = NFS_SERVER(inode);
5390 struct nfs_client *clp = server->nfs_client;
5391 struct nfs_lockt_args arg = {
5392 .fh = NFS_FH(inode),
5393 .fl = request,
5394 };
5395 struct nfs_lockt_res res = {
5396 .denied = request,
5397 };
5398 struct rpc_message msg = {
5399 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5400 .rpc_argp = &arg,
5401 .rpc_resp = &res,
5402 .rpc_cred = state->owner->so_cred,
5403 };
5404 struct nfs4_lock_state *lsp;
5405 int status;
5406
5407 arg.lock_owner.clientid = clp->cl_clientid;
5408 status = nfs4_set_lock_state(state, request);
5409 if (status != 0)
5410 goto out;
5411 lsp = request->fl_u.nfs4_fl.owner;
5412 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5413 arg.lock_owner.s_dev = server->s_dev;
5414 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5415 switch (status) {
5416 case 0:
5417 request->fl_type = F_UNLCK;
5418 break;
5419 case -NFS4ERR_DENIED:
5420 status = 0;
5421 }
5422 request->fl_ops->fl_release_private(request);
5423 request->fl_ops = NULL;
5424 out:
5425 return status;
5426 }
5427
5428 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5429 {
5430 struct nfs4_exception exception = { };
5431 int err;
5432
5433 do {
5434 err = _nfs4_proc_getlk(state, cmd, request);
5435 trace_nfs4_get_lock(request, state, cmd, err);
5436 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5437 &exception);
5438 } while (exception.retry);
5439 return err;
5440 }
5441
5442 static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
5443 {
5444 int res = 0;
5445 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
5446 case FL_POSIX:
5447 res = posix_lock_inode_wait(inode, fl);
5448 break;
5449 case FL_FLOCK:
5450 res = flock_lock_inode_wait(inode, fl);
5451 break;
5452 default:
5453 BUG();
5454 }
5455 return res;
5456 }
5457
5458 struct nfs4_unlockdata {
5459 struct nfs_locku_args arg;
5460 struct nfs_locku_res res;
5461 struct nfs4_lock_state *lsp;
5462 struct nfs_open_context *ctx;
5463 struct file_lock fl;
5464 const struct nfs_server *server;
5465 unsigned long timestamp;
5466 };
5467
5468 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5469 struct nfs_open_context *ctx,
5470 struct nfs4_lock_state *lsp,
5471 struct nfs_seqid *seqid)
5472 {
5473 struct nfs4_unlockdata *p;
5474 struct inode *inode = lsp->ls_state->inode;
5475
5476 p = kzalloc(sizeof(*p), GFP_NOFS);
5477 if (p == NULL)
5478 return NULL;
5479 p->arg.fh = NFS_FH(inode);
5480 p->arg.fl = &p->fl;
5481 p->arg.seqid = seqid;
5482 p->res.seqid = seqid;
5483 p->lsp = lsp;
5484 atomic_inc(&lsp->ls_count);
5485 /* Ensure we don't close file until we're done freeing locks! */
5486 p->ctx = get_nfs_open_context(ctx);
5487 memcpy(&p->fl, fl, sizeof(p->fl));
5488 p->server = NFS_SERVER(inode);
5489 return p;
5490 }
5491
5492 static void nfs4_locku_release_calldata(void *data)
5493 {
5494 struct nfs4_unlockdata *calldata = data;
5495 nfs_free_seqid(calldata->arg.seqid);
5496 nfs4_put_lock_state(calldata->lsp);
5497 put_nfs_open_context(calldata->ctx);
5498 kfree(calldata);
5499 }
5500
5501 static void nfs4_locku_done(struct rpc_task *task, void *data)
5502 {
5503 struct nfs4_unlockdata *calldata = data;
5504
5505 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5506 return;
5507 switch (task->tk_status) {
5508 case 0:
5509 renew_lease(calldata->server, calldata->timestamp);
5510 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
5511 if (nfs4_update_lock_stateid(calldata->lsp,
5512 &calldata->res.stateid))
5513 break;
5514 case -NFS4ERR_BAD_STATEID:
5515 case -NFS4ERR_OLD_STATEID:
5516 case -NFS4ERR_STALE_STATEID:
5517 case -NFS4ERR_EXPIRED:
5518 if (!nfs4_stateid_match(&calldata->arg.stateid,
5519 &calldata->lsp->ls_stateid))
5520 rpc_restart_call_prepare(task);
5521 break;
5522 default:
5523 if (nfs4_async_handle_error(task, calldata->server,
5524 NULL, NULL) == -EAGAIN)
5525 rpc_restart_call_prepare(task);
5526 }
5527 nfs_release_seqid(calldata->arg.seqid);
5528 }
5529
5530 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5531 {
5532 struct nfs4_unlockdata *calldata = data;
5533
5534 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5535 goto out_wait;
5536 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5537 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5538 /* Note: exit _without_ running nfs4_locku_done */
5539 goto out_no_action;
5540 }
5541 calldata->timestamp = jiffies;
5542 if (nfs4_setup_sequence(calldata->server,
5543 &calldata->arg.seq_args,
5544 &calldata->res.seq_res,
5545 task) != 0)
5546 nfs_release_seqid(calldata->arg.seqid);
5547 return;
5548 out_no_action:
5549 task->tk_action = NULL;
5550 out_wait:
5551 nfs4_sequence_done(task, &calldata->res.seq_res);
5552 }
5553
5554 static const struct rpc_call_ops nfs4_locku_ops = {
5555 .rpc_call_prepare = nfs4_locku_prepare,
5556 .rpc_call_done = nfs4_locku_done,
5557 .rpc_release = nfs4_locku_release_calldata,
5558 };
5559
5560 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5561 struct nfs_open_context *ctx,
5562 struct nfs4_lock_state *lsp,
5563 struct nfs_seqid *seqid)
5564 {
5565 struct nfs4_unlockdata *data;
5566 struct rpc_message msg = {
5567 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5568 .rpc_cred = ctx->cred,
5569 };
5570 struct rpc_task_setup task_setup_data = {
5571 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5572 .rpc_message = &msg,
5573 .callback_ops = &nfs4_locku_ops,
5574 .workqueue = nfsiod_workqueue,
5575 .flags = RPC_TASK_ASYNC,
5576 };
5577
5578 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5579 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5580
5581 /* Ensure this is an unlock - when canceling a lock, the
5582 * canceled lock is passed in, and it won't be an unlock.
5583 */
5584 fl->fl_type = F_UNLCK;
5585
5586 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5587 if (data == NULL) {
5588 nfs_free_seqid(seqid);
5589 return ERR_PTR(-ENOMEM);
5590 }
5591
5592 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5593 msg.rpc_argp = &data->arg;
5594 msg.rpc_resp = &data->res;
5595 task_setup_data.callback_data = data;
5596 return rpc_run_task(&task_setup_data);
5597 }
5598
5599 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5600 {
5601 struct inode *inode = state->inode;
5602 struct nfs4_state_owner *sp = state->owner;
5603 struct nfs_inode *nfsi = NFS_I(inode);
5604 struct nfs_seqid *seqid;
5605 struct nfs4_lock_state *lsp;
5606 struct rpc_task *task;
5607 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5608 int status = 0;
5609 unsigned char fl_flags = request->fl_flags;
5610
5611 status = nfs4_set_lock_state(state, request);
5612 /* Unlock _before_ we do the RPC call */
5613 request->fl_flags |= FL_EXISTS;
5614 /* Exclude nfs_delegation_claim_locks() */
5615 mutex_lock(&sp->so_delegreturn_mutex);
5616 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5617 down_read(&nfsi->rwsem);
5618 if (do_vfs_lock(inode, request) == -ENOENT) {
5619 up_read(&nfsi->rwsem);
5620 mutex_unlock(&sp->so_delegreturn_mutex);
5621 goto out;
5622 }
5623 up_read(&nfsi->rwsem);
5624 mutex_unlock(&sp->so_delegreturn_mutex);
5625 if (status != 0)
5626 goto out;
5627 /* Is this a delegated lock? */
5628 lsp = request->fl_u.nfs4_fl.owner;
5629 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5630 goto out;
5631 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5632 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5633 status = -ENOMEM;
5634 if (IS_ERR(seqid))
5635 goto out;
5636 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5637 status = PTR_ERR(task);
5638 if (IS_ERR(task))
5639 goto out;
5640 status = nfs4_wait_for_completion_rpc_task(task);
5641 rpc_put_task(task);
5642 out:
5643 request->fl_flags = fl_flags;
5644 trace_nfs4_unlock(request, state, F_SETLK, status);
5645 return status;
5646 }
5647
5648 struct nfs4_lockdata {
5649 struct nfs_lock_args arg;
5650 struct nfs_lock_res res;
5651 struct nfs4_lock_state *lsp;
5652 struct nfs_open_context *ctx;
5653 struct file_lock fl;
5654 unsigned long timestamp;
5655 int rpc_status;
5656 int cancelled;
5657 struct nfs_server *server;
5658 };
5659
5660 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5661 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5662 gfp_t gfp_mask)
5663 {
5664 struct nfs4_lockdata *p;
5665 struct inode *inode = lsp->ls_state->inode;
5666 struct nfs_server *server = NFS_SERVER(inode);
5667 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5668
5669 p = kzalloc(sizeof(*p), gfp_mask);
5670 if (p == NULL)
5671 return NULL;
5672
5673 p->arg.fh = NFS_FH(inode);
5674 p->arg.fl = &p->fl;
5675 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5676 if (IS_ERR(p->arg.open_seqid))
5677 goto out_free;
5678 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5679 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5680 if (IS_ERR(p->arg.lock_seqid))
5681 goto out_free_seqid;
5682 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5683 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5684 p->arg.lock_owner.s_dev = server->s_dev;
5685 p->res.lock_seqid = p->arg.lock_seqid;
5686 p->lsp = lsp;
5687 p->server = server;
5688 atomic_inc(&lsp->ls_count);
5689 p->ctx = get_nfs_open_context(ctx);
5690 get_file(fl->fl_file);
5691 memcpy(&p->fl, fl, sizeof(p->fl));
5692 return p;
5693 out_free_seqid:
5694 nfs_free_seqid(p->arg.open_seqid);
5695 out_free:
5696 kfree(p);
5697 return NULL;
5698 }
5699
5700 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5701 {
5702 struct nfs4_lockdata *data = calldata;
5703 struct nfs4_state *state = data->lsp->ls_state;
5704
5705 dprintk("%s: begin!\n", __func__);
5706 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5707 goto out_wait;
5708 /* Do we need to do an open_to_lock_owner? */
5709 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5710 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5711 goto out_release_lock_seqid;
5712 }
5713 nfs4_stateid_copy(&data->arg.open_stateid,
5714 &state->open_stateid);
5715 data->arg.new_lock_owner = 1;
5716 data->res.open_seqid = data->arg.open_seqid;
5717 } else {
5718 data->arg.new_lock_owner = 0;
5719 nfs4_stateid_copy(&data->arg.lock_stateid,
5720 &data->lsp->ls_stateid);
5721 }
5722 if (!nfs4_valid_open_stateid(state)) {
5723 data->rpc_status = -EBADF;
5724 task->tk_action = NULL;
5725 goto out_release_open_seqid;
5726 }
5727 data->timestamp = jiffies;
5728 if (nfs4_setup_sequence(data->server,
5729 &data->arg.seq_args,
5730 &data->res.seq_res,
5731 task) == 0)
5732 return;
5733 out_release_open_seqid:
5734 nfs_release_seqid(data->arg.open_seqid);
5735 out_release_lock_seqid:
5736 nfs_release_seqid(data->arg.lock_seqid);
5737 out_wait:
5738 nfs4_sequence_done(task, &data->res.seq_res);
5739 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5740 }
5741
5742 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5743 {
5744 struct nfs4_lockdata *data = calldata;
5745 struct nfs4_lock_state *lsp = data->lsp;
5746
5747 dprintk("%s: begin!\n", __func__);
5748
5749 if (!nfs4_sequence_done(task, &data->res.seq_res))
5750 return;
5751
5752 data->rpc_status = task->tk_status;
5753 switch (task->tk_status) {
5754 case 0:
5755 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
5756 data->timestamp);
5757 if (data->arg.new_lock) {
5758 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5759 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
5760 rpc_restart_call_prepare(task);
5761 break;
5762 }
5763 }
5764 if (data->arg.new_lock_owner != 0) {
5765 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5766 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5767 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5768 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5769 rpc_restart_call_prepare(task);
5770 break;
5771 case -NFS4ERR_BAD_STATEID:
5772 case -NFS4ERR_OLD_STATEID:
5773 case -NFS4ERR_STALE_STATEID:
5774 case -NFS4ERR_EXPIRED:
5775 if (data->arg.new_lock_owner != 0) {
5776 if (!nfs4_stateid_match(&data->arg.open_stateid,
5777 &lsp->ls_state->open_stateid))
5778 rpc_restart_call_prepare(task);
5779 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5780 &lsp->ls_stateid))
5781 rpc_restart_call_prepare(task);
5782 }
5783 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5784 }
5785
5786 static void nfs4_lock_release(void *calldata)
5787 {
5788 struct nfs4_lockdata *data = calldata;
5789
5790 dprintk("%s: begin!\n", __func__);
5791 nfs_free_seqid(data->arg.open_seqid);
5792 if (data->cancelled != 0) {
5793 struct rpc_task *task;
5794 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5795 data->arg.lock_seqid);
5796 if (!IS_ERR(task))
5797 rpc_put_task_async(task);
5798 dprintk("%s: cancelling lock!\n", __func__);
5799 } else
5800 nfs_free_seqid(data->arg.lock_seqid);
5801 nfs4_put_lock_state(data->lsp);
5802 put_nfs_open_context(data->ctx);
5803 fput(data->fl.fl_file);
5804 kfree(data);
5805 dprintk("%s: done!\n", __func__);
5806 }
5807
5808 static const struct rpc_call_ops nfs4_lock_ops = {
5809 .rpc_call_prepare = nfs4_lock_prepare,
5810 .rpc_call_done = nfs4_lock_done,
5811 .rpc_release = nfs4_lock_release,
5812 };
5813
5814 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5815 {
5816 switch (error) {
5817 case -NFS4ERR_ADMIN_REVOKED:
5818 case -NFS4ERR_BAD_STATEID:
5819 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5820 if (new_lock_owner != 0 ||
5821 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5822 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5823 break;
5824 case -NFS4ERR_STALE_STATEID:
5825 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5826 case -NFS4ERR_EXPIRED:
5827 nfs4_schedule_lease_recovery(server->nfs_client);
5828 };
5829 }
5830
5831 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5832 {
5833 struct nfs4_lockdata *data;
5834 struct rpc_task *task;
5835 struct rpc_message msg = {
5836 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5837 .rpc_cred = state->owner->so_cred,
5838 };
5839 struct rpc_task_setup task_setup_data = {
5840 .rpc_client = NFS_CLIENT(state->inode),
5841 .rpc_message = &msg,
5842 .callback_ops = &nfs4_lock_ops,
5843 .workqueue = nfsiod_workqueue,
5844 .flags = RPC_TASK_ASYNC,
5845 };
5846 int ret;
5847
5848 dprintk("%s: begin!\n", __func__);
5849 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
5850 fl->fl_u.nfs4_fl.owner,
5851 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
5852 if (data == NULL)
5853 return -ENOMEM;
5854 if (IS_SETLKW(cmd))
5855 data->arg.block = 1;
5856 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5857 msg.rpc_argp = &data->arg;
5858 msg.rpc_resp = &data->res;
5859 task_setup_data.callback_data = data;
5860 if (recovery_type > NFS_LOCK_NEW) {
5861 if (recovery_type == NFS_LOCK_RECLAIM)
5862 data->arg.reclaim = NFS_LOCK_RECLAIM;
5863 nfs4_set_sequence_privileged(&data->arg.seq_args);
5864 } else
5865 data->arg.new_lock = 1;
5866 task = rpc_run_task(&task_setup_data);
5867 if (IS_ERR(task))
5868 return PTR_ERR(task);
5869 ret = nfs4_wait_for_completion_rpc_task(task);
5870 if (ret == 0) {
5871 ret = data->rpc_status;
5872 if (ret)
5873 nfs4_handle_setlk_error(data->server, data->lsp,
5874 data->arg.new_lock_owner, ret);
5875 } else
5876 data->cancelled = 1;
5877 rpc_put_task(task);
5878 dprintk("%s: done, ret = %d!\n", __func__, ret);
5879 return ret;
5880 }
5881
5882 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
5883 {
5884 struct nfs_server *server = NFS_SERVER(state->inode);
5885 struct nfs4_exception exception = {
5886 .inode = state->inode,
5887 };
5888 int err;
5889
5890 do {
5891 /* Cache the lock if possible... */
5892 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5893 return 0;
5894 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5895 trace_nfs4_lock_reclaim(request, state, F_SETLK, err);
5896 if (err != -NFS4ERR_DELAY)
5897 break;
5898 nfs4_handle_exception(server, err, &exception);
5899 } while (exception.retry);
5900 return err;
5901 }
5902
5903 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5904 {
5905 struct nfs_server *server = NFS_SERVER(state->inode);
5906 struct nfs4_exception exception = {
5907 .inode = state->inode,
5908 };
5909 int err;
5910
5911 err = nfs4_set_lock_state(state, request);
5912 if (err != 0)
5913 return err;
5914 if (!recover_lost_locks) {
5915 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
5916 return 0;
5917 }
5918 do {
5919 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5920 return 0;
5921 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5922 trace_nfs4_lock_expired(request, state, F_SETLK, err);
5923 switch (err) {
5924 default:
5925 goto out;
5926 case -NFS4ERR_GRACE:
5927 case -NFS4ERR_DELAY:
5928 nfs4_handle_exception(server, err, &exception);
5929 err = 0;
5930 }
5931 } while (exception.retry);
5932 out:
5933 return err;
5934 }
5935
5936 #if defined(CONFIG_NFS_V4_1)
5937 /**
5938 * nfs41_check_expired_locks - possibly free a lock stateid
5939 *
5940 * @state: NFSv4 state for an inode
5941 *
5942 * Returns NFS_OK if recovery for this stateid is now finished.
5943 * Otherwise a negative NFS4ERR value is returned.
5944 */
5945 static int nfs41_check_expired_locks(struct nfs4_state *state)
5946 {
5947 int status, ret = -NFS4ERR_BAD_STATEID;
5948 struct nfs4_lock_state *lsp;
5949 struct nfs_server *server = NFS_SERVER(state->inode);
5950
5951 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
5952 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
5953 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
5954
5955 status = nfs41_test_stateid(server,
5956 &lsp->ls_stateid,
5957 cred);
5958 trace_nfs4_test_lock_stateid(state, lsp, status);
5959 if (status != NFS_OK) {
5960 /* Free the stateid unless the server
5961 * informs us the stateid is unrecognized. */
5962 if (status != -NFS4ERR_BAD_STATEID)
5963 nfs41_free_stateid(server,
5964 &lsp->ls_stateid,
5965 cred);
5966 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5967 ret = status;
5968 }
5969 }
5970 };
5971
5972 return ret;
5973 }
5974
5975 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
5976 {
5977 int status = NFS_OK;
5978
5979 if (test_bit(LK_STATE_IN_USE, &state->flags))
5980 status = nfs41_check_expired_locks(state);
5981 if (status != NFS_OK)
5982 status = nfs4_lock_expired(state, request);
5983 return status;
5984 }
5985 #endif
5986
5987 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5988 {
5989 struct nfs_inode *nfsi = NFS_I(state->inode);
5990 unsigned char fl_flags = request->fl_flags;
5991 int status = -ENOLCK;
5992
5993 if ((fl_flags & FL_POSIX) &&
5994 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
5995 goto out;
5996 /* Is this a delegated open? */
5997 status = nfs4_set_lock_state(state, request);
5998 if (status != 0)
5999 goto out;
6000 request->fl_flags |= FL_ACCESS;
6001 status = do_vfs_lock(state->inode, request);
6002 if (status < 0)
6003 goto out;
6004 down_read(&nfsi->rwsem);
6005 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6006 /* Yes: cache locks! */
6007 /* ...but avoid races with delegation recall... */
6008 request->fl_flags = fl_flags & ~FL_SLEEP;
6009 status = do_vfs_lock(state->inode, request);
6010 up_read(&nfsi->rwsem);
6011 goto out;
6012 }
6013 up_read(&nfsi->rwsem);
6014 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6015 out:
6016 request->fl_flags = fl_flags;
6017 return status;
6018 }
6019
6020 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6021 {
6022 struct nfs4_exception exception = {
6023 .state = state,
6024 .inode = state->inode,
6025 };
6026 int err;
6027
6028 do {
6029 err = _nfs4_proc_setlk(state, cmd, request);
6030 trace_nfs4_set_lock(request, state, cmd, err);
6031 if (err == -NFS4ERR_DENIED)
6032 err = -EAGAIN;
6033 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6034 err, &exception);
6035 } while (exception.retry);
6036 return err;
6037 }
6038
6039 static int
6040 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6041 {
6042 struct nfs_open_context *ctx;
6043 struct nfs4_state *state;
6044 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6045 int status;
6046
6047 /* verify open state */
6048 ctx = nfs_file_open_context(filp);
6049 state = ctx->state;
6050
6051 if (request->fl_start < 0 || request->fl_end < 0)
6052 return -EINVAL;
6053
6054 if (IS_GETLK(cmd)) {
6055 if (state != NULL)
6056 return nfs4_proc_getlk(state, F_GETLK, request);
6057 return 0;
6058 }
6059
6060 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6061 return -EINVAL;
6062
6063 if (request->fl_type == F_UNLCK) {
6064 if (state != NULL)
6065 return nfs4_proc_unlck(state, cmd, request);
6066 return 0;
6067 }
6068
6069 if (state == NULL)
6070 return -ENOLCK;
6071 /*
6072 * Don't rely on the VFS having checked the file open mode,
6073 * since it won't do this for flock() locks.
6074 */
6075 switch (request->fl_type) {
6076 case F_RDLCK:
6077 if (!(filp->f_mode & FMODE_READ))
6078 return -EBADF;
6079 break;
6080 case F_WRLCK:
6081 if (!(filp->f_mode & FMODE_WRITE))
6082 return -EBADF;
6083 }
6084
6085 do {
6086 status = nfs4_proc_setlk(state, cmd, request);
6087 if ((status != -EAGAIN) || IS_SETLK(cmd))
6088 break;
6089 timeout = nfs4_set_lock_task_retry(timeout);
6090 status = -ERESTARTSYS;
6091 if (signalled())
6092 break;
6093 } while(status < 0);
6094 return status;
6095 }
6096
6097 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6098 {
6099 struct nfs_server *server = NFS_SERVER(state->inode);
6100 int err;
6101
6102 err = nfs4_set_lock_state(state, fl);
6103 if (err != 0)
6104 return err;
6105 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6106 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6107 }
6108
6109 struct nfs_release_lockowner_data {
6110 struct nfs4_lock_state *lsp;
6111 struct nfs_server *server;
6112 struct nfs_release_lockowner_args args;
6113 struct nfs_release_lockowner_res res;
6114 unsigned long timestamp;
6115 };
6116
6117 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6118 {
6119 struct nfs_release_lockowner_data *data = calldata;
6120 struct nfs_server *server = data->server;
6121 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6122 &data->args.seq_args, &data->res.seq_res, task);
6123 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6124 data->timestamp = jiffies;
6125 }
6126
6127 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6128 {
6129 struct nfs_release_lockowner_data *data = calldata;
6130 struct nfs_server *server = data->server;
6131
6132 nfs40_sequence_done(task, &data->res.seq_res);
6133
6134 switch (task->tk_status) {
6135 case 0:
6136 renew_lease(server, data->timestamp);
6137 break;
6138 case -NFS4ERR_STALE_CLIENTID:
6139 case -NFS4ERR_EXPIRED:
6140 nfs4_schedule_lease_recovery(server->nfs_client);
6141 break;
6142 case -NFS4ERR_LEASE_MOVED:
6143 case -NFS4ERR_DELAY:
6144 if (nfs4_async_handle_error(task, server,
6145 NULL, NULL) == -EAGAIN)
6146 rpc_restart_call_prepare(task);
6147 }
6148 }
6149
6150 static void nfs4_release_lockowner_release(void *calldata)
6151 {
6152 struct nfs_release_lockowner_data *data = calldata;
6153 nfs4_free_lock_state(data->server, data->lsp);
6154 kfree(calldata);
6155 }
6156
6157 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6158 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6159 .rpc_call_done = nfs4_release_lockowner_done,
6160 .rpc_release = nfs4_release_lockowner_release,
6161 };
6162
6163 static void
6164 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6165 {
6166 struct nfs_release_lockowner_data *data;
6167 struct rpc_message msg = {
6168 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6169 };
6170
6171 if (server->nfs_client->cl_mvops->minor_version != 0)
6172 return;
6173
6174 data = kmalloc(sizeof(*data), GFP_NOFS);
6175 if (!data)
6176 return;
6177 data->lsp = lsp;
6178 data->server = server;
6179 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6180 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6181 data->args.lock_owner.s_dev = server->s_dev;
6182
6183 msg.rpc_argp = &data->args;
6184 msg.rpc_resp = &data->res;
6185 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6186 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6187 }
6188
6189 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6190
6191 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
6192 const void *buf, size_t buflen,
6193 int flags, int type)
6194 {
6195 if (strcmp(key, "") != 0)
6196 return -EINVAL;
6197
6198 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
6199 }
6200
6201 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
6202 void *buf, size_t buflen, int type)
6203 {
6204 if (strcmp(key, "") != 0)
6205 return -EINVAL;
6206
6207 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen);
6208 }
6209
6210 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
6211 size_t list_len, const char *name,
6212 size_t name_len, int type)
6213 {
6214 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
6215
6216 if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))))
6217 return 0;
6218
6219 if (list && len <= list_len)
6220 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
6221 return len;
6222 }
6223
6224 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6225 static inline int nfs4_server_supports_labels(struct nfs_server *server)
6226 {
6227 return server->caps & NFS_CAP_SECURITY_LABEL;
6228 }
6229
6230 static int nfs4_xattr_set_nfs4_label(struct dentry *dentry, const char *key,
6231 const void *buf, size_t buflen,
6232 int flags, int type)
6233 {
6234 if (security_ismaclabel(key))
6235 return nfs4_set_security_label(dentry, buf, buflen);
6236
6237 return -EOPNOTSUPP;
6238 }
6239
6240 static int nfs4_xattr_get_nfs4_label(struct dentry *dentry, const char *key,
6241 void *buf, size_t buflen, int type)
6242 {
6243 if (security_ismaclabel(key))
6244 return nfs4_get_security_label(d_inode(dentry), buf, buflen);
6245 return -EOPNOTSUPP;
6246 }
6247
6248 static size_t nfs4_xattr_list_nfs4_label(struct dentry *dentry, char *list,
6249 size_t list_len, const char *name,
6250 size_t name_len, int type)
6251 {
6252 size_t len = 0;
6253
6254 if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
6255 len = security_inode_listsecurity(d_inode(dentry), NULL, 0);
6256 if (list && len <= list_len)
6257 security_inode_listsecurity(d_inode(dentry), list, len);
6258 }
6259 return len;
6260 }
6261
6262 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6263 .prefix = XATTR_SECURITY_PREFIX,
6264 .list = nfs4_xattr_list_nfs4_label,
6265 .get = nfs4_xattr_get_nfs4_label,
6266 .set = nfs4_xattr_set_nfs4_label,
6267 };
6268 #endif
6269
6270
6271 /*
6272 * nfs_fhget will use either the mounted_on_fileid or the fileid
6273 */
6274 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6275 {
6276 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6277 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6278 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6279 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6280 return;
6281
6282 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6283 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6284 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6285 fattr->nlink = 2;
6286 }
6287
6288 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6289 const struct qstr *name,
6290 struct nfs4_fs_locations *fs_locations,
6291 struct page *page)
6292 {
6293 struct nfs_server *server = NFS_SERVER(dir);
6294 u32 bitmask[3] = {
6295 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6296 };
6297 struct nfs4_fs_locations_arg args = {
6298 .dir_fh = NFS_FH(dir),
6299 .name = name,
6300 .page = page,
6301 .bitmask = bitmask,
6302 };
6303 struct nfs4_fs_locations_res res = {
6304 .fs_locations = fs_locations,
6305 };
6306 struct rpc_message msg = {
6307 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6308 .rpc_argp = &args,
6309 .rpc_resp = &res,
6310 };
6311 int status;
6312
6313 dprintk("%s: start\n", __func__);
6314
6315 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6316 * is not supported */
6317 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6318 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6319 else
6320 bitmask[0] |= FATTR4_WORD0_FILEID;
6321
6322 nfs_fattr_init(&fs_locations->fattr);
6323 fs_locations->server = server;
6324 fs_locations->nlocations = 0;
6325 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6326 dprintk("%s: returned status = %d\n", __func__, status);
6327 return status;
6328 }
6329
6330 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6331 const struct qstr *name,
6332 struct nfs4_fs_locations *fs_locations,
6333 struct page *page)
6334 {
6335 struct nfs4_exception exception = { };
6336 int err;
6337 do {
6338 err = _nfs4_proc_fs_locations(client, dir, name,
6339 fs_locations, page);
6340 trace_nfs4_get_fs_locations(dir, name, err);
6341 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6342 &exception);
6343 } while (exception.retry);
6344 return err;
6345 }
6346
6347 /*
6348 * This operation also signals the server that this client is
6349 * performing migration recovery. The server can stop returning
6350 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6351 * appended to this compound to identify the client ID which is
6352 * performing recovery.
6353 */
6354 static int _nfs40_proc_get_locations(struct inode *inode,
6355 struct nfs4_fs_locations *locations,
6356 struct page *page, struct rpc_cred *cred)
6357 {
6358 struct nfs_server *server = NFS_SERVER(inode);
6359 struct rpc_clnt *clnt = server->client;
6360 u32 bitmask[2] = {
6361 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6362 };
6363 struct nfs4_fs_locations_arg args = {
6364 .clientid = server->nfs_client->cl_clientid,
6365 .fh = NFS_FH(inode),
6366 .page = page,
6367 .bitmask = bitmask,
6368 .migration = 1, /* skip LOOKUP */
6369 .renew = 1, /* append RENEW */
6370 };
6371 struct nfs4_fs_locations_res res = {
6372 .fs_locations = locations,
6373 .migration = 1,
6374 .renew = 1,
6375 };
6376 struct rpc_message msg = {
6377 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6378 .rpc_argp = &args,
6379 .rpc_resp = &res,
6380 .rpc_cred = cred,
6381 };
6382 unsigned long now = jiffies;
6383 int status;
6384
6385 nfs_fattr_init(&locations->fattr);
6386 locations->server = server;
6387 locations->nlocations = 0;
6388
6389 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6390 nfs4_set_sequence_privileged(&args.seq_args);
6391 status = nfs4_call_sync_sequence(clnt, server, &msg,
6392 &args.seq_args, &res.seq_res);
6393 if (status)
6394 return status;
6395
6396 renew_lease(server, now);
6397 return 0;
6398 }
6399
6400 #ifdef CONFIG_NFS_V4_1
6401
6402 /*
6403 * This operation also signals the server that this client is
6404 * performing migration recovery. The server can stop asserting
6405 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6406 * performing this operation is identified in the SEQUENCE
6407 * operation in this compound.
6408 *
6409 * When the client supports GETATTR(fs_locations_info), it can
6410 * be plumbed in here.
6411 */
6412 static int _nfs41_proc_get_locations(struct inode *inode,
6413 struct nfs4_fs_locations *locations,
6414 struct page *page, struct rpc_cred *cred)
6415 {
6416 struct nfs_server *server = NFS_SERVER(inode);
6417 struct rpc_clnt *clnt = server->client;
6418 u32 bitmask[2] = {
6419 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6420 };
6421 struct nfs4_fs_locations_arg args = {
6422 .fh = NFS_FH(inode),
6423 .page = page,
6424 .bitmask = bitmask,
6425 .migration = 1, /* skip LOOKUP */
6426 };
6427 struct nfs4_fs_locations_res res = {
6428 .fs_locations = locations,
6429 .migration = 1,
6430 };
6431 struct rpc_message msg = {
6432 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6433 .rpc_argp = &args,
6434 .rpc_resp = &res,
6435 .rpc_cred = cred,
6436 };
6437 int status;
6438
6439 nfs_fattr_init(&locations->fattr);
6440 locations->server = server;
6441 locations->nlocations = 0;
6442
6443 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6444 nfs4_set_sequence_privileged(&args.seq_args);
6445 status = nfs4_call_sync_sequence(clnt, server, &msg,
6446 &args.seq_args, &res.seq_res);
6447 if (status == NFS4_OK &&
6448 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6449 status = -NFS4ERR_LEASE_MOVED;
6450 return status;
6451 }
6452
6453 #endif /* CONFIG_NFS_V4_1 */
6454
6455 /**
6456 * nfs4_proc_get_locations - discover locations for a migrated FSID
6457 * @inode: inode on FSID that is migrating
6458 * @locations: result of query
6459 * @page: buffer
6460 * @cred: credential to use for this operation
6461 *
6462 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6463 * operation failed, or a negative errno if a local error occurred.
6464 *
6465 * On success, "locations" is filled in, but if the server has
6466 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6467 * asserted.
6468 *
6469 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6470 * from this client that require migration recovery.
6471 */
6472 int nfs4_proc_get_locations(struct inode *inode,
6473 struct nfs4_fs_locations *locations,
6474 struct page *page, struct rpc_cred *cred)
6475 {
6476 struct nfs_server *server = NFS_SERVER(inode);
6477 struct nfs_client *clp = server->nfs_client;
6478 const struct nfs4_mig_recovery_ops *ops =
6479 clp->cl_mvops->mig_recovery_ops;
6480 struct nfs4_exception exception = { };
6481 int status;
6482
6483 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6484 (unsigned long long)server->fsid.major,
6485 (unsigned long long)server->fsid.minor,
6486 clp->cl_hostname);
6487 nfs_display_fhandle(NFS_FH(inode), __func__);
6488
6489 do {
6490 status = ops->get_locations(inode, locations, page, cred);
6491 if (status != -NFS4ERR_DELAY)
6492 break;
6493 nfs4_handle_exception(server, status, &exception);
6494 } while (exception.retry);
6495 return status;
6496 }
6497
6498 /*
6499 * This operation also signals the server that this client is
6500 * performing "lease moved" recovery. The server can stop
6501 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6502 * is appended to this compound to identify the client ID which is
6503 * performing recovery.
6504 */
6505 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6506 {
6507 struct nfs_server *server = NFS_SERVER(inode);
6508 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6509 struct rpc_clnt *clnt = server->client;
6510 struct nfs4_fsid_present_arg args = {
6511 .fh = NFS_FH(inode),
6512 .clientid = clp->cl_clientid,
6513 .renew = 1, /* append RENEW */
6514 };
6515 struct nfs4_fsid_present_res res = {
6516 .renew = 1,
6517 };
6518 struct rpc_message msg = {
6519 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6520 .rpc_argp = &args,
6521 .rpc_resp = &res,
6522 .rpc_cred = cred,
6523 };
6524 unsigned long now = jiffies;
6525 int status;
6526
6527 res.fh = nfs_alloc_fhandle();
6528 if (res.fh == NULL)
6529 return -ENOMEM;
6530
6531 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6532 nfs4_set_sequence_privileged(&args.seq_args);
6533 status = nfs4_call_sync_sequence(clnt, server, &msg,
6534 &args.seq_args, &res.seq_res);
6535 nfs_free_fhandle(res.fh);
6536 if (status)
6537 return status;
6538
6539 do_renew_lease(clp, now);
6540 return 0;
6541 }
6542
6543 #ifdef CONFIG_NFS_V4_1
6544
6545 /*
6546 * This operation also signals the server that this client is
6547 * performing "lease moved" recovery. The server can stop asserting
6548 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6549 * this operation is identified in the SEQUENCE operation in this
6550 * compound.
6551 */
6552 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6553 {
6554 struct nfs_server *server = NFS_SERVER(inode);
6555 struct rpc_clnt *clnt = server->client;
6556 struct nfs4_fsid_present_arg args = {
6557 .fh = NFS_FH(inode),
6558 };
6559 struct nfs4_fsid_present_res res = {
6560 };
6561 struct rpc_message msg = {
6562 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6563 .rpc_argp = &args,
6564 .rpc_resp = &res,
6565 .rpc_cred = cred,
6566 };
6567 int status;
6568
6569 res.fh = nfs_alloc_fhandle();
6570 if (res.fh == NULL)
6571 return -ENOMEM;
6572
6573 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6574 nfs4_set_sequence_privileged(&args.seq_args);
6575 status = nfs4_call_sync_sequence(clnt, server, &msg,
6576 &args.seq_args, &res.seq_res);
6577 nfs_free_fhandle(res.fh);
6578 if (status == NFS4_OK &&
6579 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6580 status = -NFS4ERR_LEASE_MOVED;
6581 return status;
6582 }
6583
6584 #endif /* CONFIG_NFS_V4_1 */
6585
6586 /**
6587 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6588 * @inode: inode on FSID to check
6589 * @cred: credential to use for this operation
6590 *
6591 * Server indicates whether the FSID is present, moved, or not
6592 * recognized. This operation is necessary to clear a LEASE_MOVED
6593 * condition for this client ID.
6594 *
6595 * Returns NFS4_OK if the FSID is present on this server,
6596 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6597 * NFS4ERR code if some error occurred on the server, or a
6598 * negative errno if a local failure occurred.
6599 */
6600 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6601 {
6602 struct nfs_server *server = NFS_SERVER(inode);
6603 struct nfs_client *clp = server->nfs_client;
6604 const struct nfs4_mig_recovery_ops *ops =
6605 clp->cl_mvops->mig_recovery_ops;
6606 struct nfs4_exception exception = { };
6607 int status;
6608
6609 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6610 (unsigned long long)server->fsid.major,
6611 (unsigned long long)server->fsid.minor,
6612 clp->cl_hostname);
6613 nfs_display_fhandle(NFS_FH(inode), __func__);
6614
6615 do {
6616 status = ops->fsid_present(inode, cred);
6617 if (status != -NFS4ERR_DELAY)
6618 break;
6619 nfs4_handle_exception(server, status, &exception);
6620 } while (exception.retry);
6621 return status;
6622 }
6623
6624 /**
6625 * If 'use_integrity' is true and the state managment nfs_client
6626 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6627 * and the machine credential as per RFC3530bis and RFC5661 Security
6628 * Considerations sections. Otherwise, just use the user cred with the
6629 * filesystem's rpc_client.
6630 */
6631 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6632 {
6633 int status;
6634 struct nfs4_secinfo_arg args = {
6635 .dir_fh = NFS_FH(dir),
6636 .name = name,
6637 };
6638 struct nfs4_secinfo_res res = {
6639 .flavors = flavors,
6640 };
6641 struct rpc_message msg = {
6642 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6643 .rpc_argp = &args,
6644 .rpc_resp = &res,
6645 };
6646 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6647 struct rpc_cred *cred = NULL;
6648
6649 if (use_integrity) {
6650 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6651 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6652 msg.rpc_cred = cred;
6653 }
6654
6655 dprintk("NFS call secinfo %s\n", name->name);
6656
6657 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6658 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6659
6660 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6661 &res.seq_res, 0);
6662 dprintk("NFS reply secinfo: %d\n", status);
6663
6664 if (cred)
6665 put_rpccred(cred);
6666
6667 return status;
6668 }
6669
6670 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6671 struct nfs4_secinfo_flavors *flavors)
6672 {
6673 struct nfs4_exception exception = { };
6674 int err;
6675 do {
6676 err = -NFS4ERR_WRONGSEC;
6677
6678 /* try to use integrity protection with machine cred */
6679 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6680 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6681
6682 /*
6683 * if unable to use integrity protection, or SECINFO with
6684 * integrity protection returns NFS4ERR_WRONGSEC (which is
6685 * disallowed by spec, but exists in deployed servers) use
6686 * the current filesystem's rpc_client and the user cred.
6687 */
6688 if (err == -NFS4ERR_WRONGSEC)
6689 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6690
6691 trace_nfs4_secinfo(dir, name, err);
6692 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6693 &exception);
6694 } while (exception.retry);
6695 return err;
6696 }
6697
6698 #ifdef CONFIG_NFS_V4_1
6699 /*
6700 * Check the exchange flags returned by the server for invalid flags, having
6701 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6702 * DS flags set.
6703 */
6704 static int nfs4_check_cl_exchange_flags(u32 flags)
6705 {
6706 if (flags & ~EXCHGID4_FLAG_MASK_R)
6707 goto out_inval;
6708 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6709 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6710 goto out_inval;
6711 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6712 goto out_inval;
6713 return NFS_OK;
6714 out_inval:
6715 return -NFS4ERR_INVAL;
6716 }
6717
6718 static bool
6719 nfs41_same_server_scope(struct nfs41_server_scope *a,
6720 struct nfs41_server_scope *b)
6721 {
6722 if (a->server_scope_sz == b->server_scope_sz &&
6723 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6724 return true;
6725
6726 return false;
6727 }
6728
6729 /*
6730 * nfs4_proc_bind_conn_to_session()
6731 *
6732 * The 4.1 client currently uses the same TCP connection for the
6733 * fore and backchannel.
6734 */
6735 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6736 {
6737 int status;
6738 struct nfs41_bind_conn_to_session_args args = {
6739 .client = clp,
6740 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6741 };
6742 struct nfs41_bind_conn_to_session_res res;
6743 struct rpc_message msg = {
6744 .rpc_proc =
6745 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6746 .rpc_argp = &args,
6747 .rpc_resp = &res,
6748 .rpc_cred = cred,
6749 };
6750
6751 dprintk("--> %s\n", __func__);
6752
6753 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6754 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6755 args.dir = NFS4_CDFC4_FORE;
6756
6757 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6758 trace_nfs4_bind_conn_to_session(clp, status);
6759 if (status == 0) {
6760 if (memcmp(res.sessionid.data,
6761 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6762 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6763 status = -EIO;
6764 goto out;
6765 }
6766 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6767 dprintk("NFS: %s: Unexpected direction from server\n",
6768 __func__);
6769 status = -EIO;
6770 goto out;
6771 }
6772 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6773 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6774 __func__);
6775 status = -EIO;
6776 goto out;
6777 }
6778 }
6779 out:
6780 dprintk("<-- %s status= %d\n", __func__, status);
6781 return status;
6782 }
6783
6784 /*
6785 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6786 * and operations we'd like to see to enable certain features in the allow map
6787 */
6788 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6789 .how = SP4_MACH_CRED,
6790 .enforce.u.words = {
6791 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6792 1 << (OP_EXCHANGE_ID - 32) |
6793 1 << (OP_CREATE_SESSION - 32) |
6794 1 << (OP_DESTROY_SESSION - 32) |
6795 1 << (OP_DESTROY_CLIENTID - 32)
6796 },
6797 .allow.u.words = {
6798 [0] = 1 << (OP_CLOSE) |
6799 1 << (OP_LOCKU) |
6800 1 << (OP_COMMIT),
6801 [1] = 1 << (OP_SECINFO - 32) |
6802 1 << (OP_SECINFO_NO_NAME - 32) |
6803 1 << (OP_TEST_STATEID - 32) |
6804 1 << (OP_FREE_STATEID - 32) |
6805 1 << (OP_WRITE - 32)
6806 }
6807 };
6808
6809 /*
6810 * Select the state protection mode for client `clp' given the server results
6811 * from exchange_id in `sp'.
6812 *
6813 * Returns 0 on success, negative errno otherwise.
6814 */
6815 static int nfs4_sp4_select_mode(struct nfs_client *clp,
6816 struct nfs41_state_protection *sp)
6817 {
6818 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
6819 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6820 1 << (OP_EXCHANGE_ID - 32) |
6821 1 << (OP_CREATE_SESSION - 32) |
6822 1 << (OP_DESTROY_SESSION - 32) |
6823 1 << (OP_DESTROY_CLIENTID - 32)
6824 };
6825 unsigned int i;
6826
6827 if (sp->how == SP4_MACH_CRED) {
6828 /* Print state protect result */
6829 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
6830 for (i = 0; i <= LAST_NFS4_OP; i++) {
6831 if (test_bit(i, sp->enforce.u.longs))
6832 dfprintk(MOUNT, " enforce op %d\n", i);
6833 if (test_bit(i, sp->allow.u.longs))
6834 dfprintk(MOUNT, " allow op %d\n", i);
6835 }
6836
6837 /* make sure nothing is on enforce list that isn't supported */
6838 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
6839 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
6840 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6841 return -EINVAL;
6842 }
6843 }
6844
6845 /*
6846 * Minimal mode - state operations are allowed to use machine
6847 * credential. Note this already happens by default, so the
6848 * client doesn't have to do anything more than the negotiation.
6849 *
6850 * NOTE: we don't care if EXCHANGE_ID is in the list -
6851 * we're already using the machine cred for exchange_id
6852 * and will never use a different cred.
6853 */
6854 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
6855 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
6856 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
6857 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
6858 dfprintk(MOUNT, "sp4_mach_cred:\n");
6859 dfprintk(MOUNT, " minimal mode enabled\n");
6860 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
6861 } else {
6862 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6863 return -EINVAL;
6864 }
6865
6866 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
6867 test_bit(OP_LOCKU, sp->allow.u.longs)) {
6868 dfprintk(MOUNT, " cleanup mode enabled\n");
6869 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
6870 }
6871
6872 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
6873 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
6874 dfprintk(MOUNT, " secinfo mode enabled\n");
6875 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
6876 }
6877
6878 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
6879 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
6880 dfprintk(MOUNT, " stateid mode enabled\n");
6881 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
6882 }
6883
6884 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
6885 dfprintk(MOUNT, " write mode enabled\n");
6886 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
6887 }
6888
6889 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
6890 dfprintk(MOUNT, " commit mode enabled\n");
6891 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
6892 }
6893 }
6894
6895 return 0;
6896 }
6897
6898 /*
6899 * _nfs4_proc_exchange_id()
6900 *
6901 * Wrapper for EXCHANGE_ID operation.
6902 */
6903 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
6904 u32 sp4_how)
6905 {
6906 nfs4_verifier verifier;
6907 struct nfs41_exchange_id_args args = {
6908 .verifier = &verifier,
6909 .client = clp,
6910 #ifdef CONFIG_NFS_V4_1_MIGRATION
6911 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6912 EXCHGID4_FLAG_BIND_PRINC_STATEID |
6913 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
6914 #else
6915 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6916 EXCHGID4_FLAG_BIND_PRINC_STATEID,
6917 #endif
6918 };
6919 struct nfs41_exchange_id_res res = {
6920 0
6921 };
6922 int status;
6923 struct rpc_message msg = {
6924 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
6925 .rpc_argp = &args,
6926 .rpc_resp = &res,
6927 .rpc_cred = cred,
6928 };
6929
6930 nfs4_init_boot_verifier(clp, &verifier);
6931
6932 status = nfs4_init_uniform_client_string(clp);
6933 if (status)
6934 goto out;
6935
6936 dprintk("NFS call exchange_id auth=%s, '%s'\n",
6937 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6938 clp->cl_owner_id);
6939
6940 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
6941 GFP_NOFS);
6942 if (unlikely(res.server_owner == NULL)) {
6943 status = -ENOMEM;
6944 goto out;
6945 }
6946
6947 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
6948 GFP_NOFS);
6949 if (unlikely(res.server_scope == NULL)) {
6950 status = -ENOMEM;
6951 goto out_server_owner;
6952 }
6953
6954 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
6955 if (unlikely(res.impl_id == NULL)) {
6956 status = -ENOMEM;
6957 goto out_server_scope;
6958 }
6959
6960 switch (sp4_how) {
6961 case SP4_NONE:
6962 args.state_protect.how = SP4_NONE;
6963 break;
6964
6965 case SP4_MACH_CRED:
6966 args.state_protect = nfs4_sp4_mach_cred_request;
6967 break;
6968
6969 default:
6970 /* unsupported! */
6971 WARN_ON_ONCE(1);
6972 status = -EINVAL;
6973 goto out_impl_id;
6974 }
6975
6976 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6977 trace_nfs4_exchange_id(clp, status);
6978 if (status == 0)
6979 status = nfs4_check_cl_exchange_flags(res.flags);
6980
6981 if (status == 0)
6982 status = nfs4_sp4_select_mode(clp, &res.state_protect);
6983
6984 if (status == 0) {
6985 clp->cl_clientid = res.clientid;
6986 clp->cl_exchange_flags = res.flags;
6987 /* Client ID is not confirmed */
6988 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
6989 clear_bit(NFS4_SESSION_ESTABLISHED,
6990 &clp->cl_session->session_state);
6991 clp->cl_seqid = res.seqid;
6992 }
6993
6994 kfree(clp->cl_serverowner);
6995 clp->cl_serverowner = res.server_owner;
6996 res.server_owner = NULL;
6997
6998 /* use the most recent implementation id */
6999 kfree(clp->cl_implid);
7000 clp->cl_implid = res.impl_id;
7001 res.impl_id = NULL;
7002
7003 if (clp->cl_serverscope != NULL &&
7004 !nfs41_same_server_scope(clp->cl_serverscope,
7005 res.server_scope)) {
7006 dprintk("%s: server_scope mismatch detected\n",
7007 __func__);
7008 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7009 kfree(clp->cl_serverscope);
7010 clp->cl_serverscope = NULL;
7011 }
7012
7013 if (clp->cl_serverscope == NULL) {
7014 clp->cl_serverscope = res.server_scope;
7015 res.server_scope = NULL;
7016 }
7017 }
7018
7019 out_impl_id:
7020 kfree(res.impl_id);
7021 out_server_scope:
7022 kfree(res.server_scope);
7023 out_server_owner:
7024 kfree(res.server_owner);
7025 out:
7026 if (clp->cl_implid != NULL)
7027 dprintk("NFS reply exchange_id: Server Implementation ID: "
7028 "domain: %s, name: %s, date: %llu,%u\n",
7029 clp->cl_implid->domain, clp->cl_implid->name,
7030 clp->cl_implid->date.seconds,
7031 clp->cl_implid->date.nseconds);
7032 dprintk("NFS reply exchange_id: %d\n", status);
7033 return status;
7034 }
7035
7036 /*
7037 * nfs4_proc_exchange_id()
7038 *
7039 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7040 *
7041 * Since the clientid has expired, all compounds using sessions
7042 * associated with the stale clientid will be returning
7043 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7044 * be in some phase of session reset.
7045 *
7046 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7047 */
7048 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7049 {
7050 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7051 int status;
7052
7053 /* try SP4_MACH_CRED if krb5i/p */
7054 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7055 authflavor == RPC_AUTH_GSS_KRB5P) {
7056 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7057 if (!status)
7058 return 0;
7059 }
7060
7061 /* try SP4_NONE */
7062 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7063 }
7064
7065 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7066 struct rpc_cred *cred)
7067 {
7068 struct rpc_message msg = {
7069 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7070 .rpc_argp = clp,
7071 .rpc_cred = cred,
7072 };
7073 int status;
7074
7075 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7076 trace_nfs4_destroy_clientid(clp, status);
7077 if (status)
7078 dprintk("NFS: Got error %d from the server %s on "
7079 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7080 return status;
7081 }
7082
7083 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7084 struct rpc_cred *cred)
7085 {
7086 unsigned int loop;
7087 int ret;
7088
7089 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7090 ret = _nfs4_proc_destroy_clientid(clp, cred);
7091 switch (ret) {
7092 case -NFS4ERR_DELAY:
7093 case -NFS4ERR_CLIENTID_BUSY:
7094 ssleep(1);
7095 break;
7096 default:
7097 return ret;
7098 }
7099 }
7100 return 0;
7101 }
7102
7103 int nfs4_destroy_clientid(struct nfs_client *clp)
7104 {
7105 struct rpc_cred *cred;
7106 int ret = 0;
7107
7108 if (clp->cl_mvops->minor_version < 1)
7109 goto out;
7110 if (clp->cl_exchange_flags == 0)
7111 goto out;
7112 if (clp->cl_preserve_clid)
7113 goto out;
7114 cred = nfs4_get_clid_cred(clp);
7115 ret = nfs4_proc_destroy_clientid(clp, cred);
7116 if (cred)
7117 put_rpccred(cred);
7118 switch (ret) {
7119 case 0:
7120 case -NFS4ERR_STALE_CLIENTID:
7121 clp->cl_exchange_flags = 0;
7122 }
7123 out:
7124 return ret;
7125 }
7126
7127 struct nfs4_get_lease_time_data {
7128 struct nfs4_get_lease_time_args *args;
7129 struct nfs4_get_lease_time_res *res;
7130 struct nfs_client *clp;
7131 };
7132
7133 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7134 void *calldata)
7135 {
7136 struct nfs4_get_lease_time_data *data =
7137 (struct nfs4_get_lease_time_data *)calldata;
7138
7139 dprintk("--> %s\n", __func__);
7140 /* just setup sequence, do not trigger session recovery
7141 since we're invoked within one */
7142 nfs41_setup_sequence(data->clp->cl_session,
7143 &data->args->la_seq_args,
7144 &data->res->lr_seq_res,
7145 task);
7146 dprintk("<-- %s\n", __func__);
7147 }
7148
7149 /*
7150 * Called from nfs4_state_manager thread for session setup, so don't recover
7151 * from sequence operation or clientid errors.
7152 */
7153 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7154 {
7155 struct nfs4_get_lease_time_data *data =
7156 (struct nfs4_get_lease_time_data *)calldata;
7157
7158 dprintk("--> %s\n", __func__);
7159 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7160 return;
7161 switch (task->tk_status) {
7162 case -NFS4ERR_DELAY:
7163 case -NFS4ERR_GRACE:
7164 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7165 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7166 task->tk_status = 0;
7167 /* fall through */
7168 case -NFS4ERR_RETRY_UNCACHED_REP:
7169 rpc_restart_call_prepare(task);
7170 return;
7171 }
7172 dprintk("<-- %s\n", __func__);
7173 }
7174
7175 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7176 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7177 .rpc_call_done = nfs4_get_lease_time_done,
7178 };
7179
7180 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7181 {
7182 struct rpc_task *task;
7183 struct nfs4_get_lease_time_args args;
7184 struct nfs4_get_lease_time_res res = {
7185 .lr_fsinfo = fsinfo,
7186 };
7187 struct nfs4_get_lease_time_data data = {
7188 .args = &args,
7189 .res = &res,
7190 .clp = clp,
7191 };
7192 struct rpc_message msg = {
7193 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7194 .rpc_argp = &args,
7195 .rpc_resp = &res,
7196 };
7197 struct rpc_task_setup task_setup = {
7198 .rpc_client = clp->cl_rpcclient,
7199 .rpc_message = &msg,
7200 .callback_ops = &nfs4_get_lease_time_ops,
7201 .callback_data = &data,
7202 .flags = RPC_TASK_TIMEOUT,
7203 };
7204 int status;
7205
7206 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7207 nfs4_set_sequence_privileged(&args.la_seq_args);
7208 dprintk("--> %s\n", __func__);
7209 task = rpc_run_task(&task_setup);
7210
7211 if (IS_ERR(task))
7212 status = PTR_ERR(task);
7213 else {
7214 status = task->tk_status;
7215 rpc_put_task(task);
7216 }
7217 dprintk("<-- %s return %d\n", __func__, status);
7218
7219 return status;
7220 }
7221
7222 /*
7223 * Initialize the values to be used by the client in CREATE_SESSION
7224 * If nfs4_init_session set the fore channel request and response sizes,
7225 * use them.
7226 *
7227 * Set the back channel max_resp_sz_cached to zero to force the client to
7228 * always set csa_cachethis to FALSE because the current implementation
7229 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7230 */
7231 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7232 {
7233 unsigned int max_rqst_sz, max_resp_sz;
7234
7235 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7236 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7237
7238 /* Fore channel attributes */
7239 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7240 args->fc_attrs.max_resp_sz = max_resp_sz;
7241 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7242 args->fc_attrs.max_reqs = max_session_slots;
7243
7244 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7245 "max_ops=%u max_reqs=%u\n",
7246 __func__,
7247 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7248 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7249
7250 /* Back channel attributes */
7251 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
7252 args->bc_attrs.max_resp_sz = PAGE_SIZE;
7253 args->bc_attrs.max_resp_sz_cached = 0;
7254 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7255 args->bc_attrs.max_reqs = 1;
7256
7257 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7258 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7259 __func__,
7260 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7261 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7262 args->bc_attrs.max_reqs);
7263 }
7264
7265 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7266 struct nfs41_create_session_res *res)
7267 {
7268 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7269 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7270
7271 if (rcvd->max_resp_sz > sent->max_resp_sz)
7272 return -EINVAL;
7273 /*
7274 * Our requested max_ops is the minimum we need; we're not
7275 * prepared to break up compounds into smaller pieces than that.
7276 * So, no point even trying to continue if the server won't
7277 * cooperate:
7278 */
7279 if (rcvd->max_ops < sent->max_ops)
7280 return -EINVAL;
7281 if (rcvd->max_reqs == 0)
7282 return -EINVAL;
7283 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7284 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7285 return 0;
7286 }
7287
7288 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7289 struct nfs41_create_session_res *res)
7290 {
7291 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7292 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7293
7294 if (!(res->flags & SESSION4_BACK_CHAN))
7295 goto out;
7296 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7297 return -EINVAL;
7298 if (rcvd->max_resp_sz < sent->max_resp_sz)
7299 return -EINVAL;
7300 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7301 return -EINVAL;
7302 /* These would render the backchannel useless: */
7303 if (rcvd->max_ops != sent->max_ops)
7304 return -EINVAL;
7305 if (rcvd->max_reqs != sent->max_reqs)
7306 return -EINVAL;
7307 out:
7308 return 0;
7309 }
7310
7311 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7312 struct nfs41_create_session_res *res)
7313 {
7314 int ret;
7315
7316 ret = nfs4_verify_fore_channel_attrs(args, res);
7317 if (ret)
7318 return ret;
7319 return nfs4_verify_back_channel_attrs(args, res);
7320 }
7321
7322 static void nfs4_update_session(struct nfs4_session *session,
7323 struct nfs41_create_session_res *res)
7324 {
7325 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7326 /* Mark client id and session as being confirmed */
7327 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7328 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7329 session->flags = res->flags;
7330 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7331 if (res->flags & SESSION4_BACK_CHAN)
7332 memcpy(&session->bc_attrs, &res->bc_attrs,
7333 sizeof(session->bc_attrs));
7334 }
7335
7336 static int _nfs4_proc_create_session(struct nfs_client *clp,
7337 struct rpc_cred *cred)
7338 {
7339 struct nfs4_session *session = clp->cl_session;
7340 struct nfs41_create_session_args args = {
7341 .client = clp,
7342 .clientid = clp->cl_clientid,
7343 .seqid = clp->cl_seqid,
7344 .cb_program = NFS4_CALLBACK,
7345 };
7346 struct nfs41_create_session_res res;
7347
7348 struct rpc_message msg = {
7349 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7350 .rpc_argp = &args,
7351 .rpc_resp = &res,
7352 .rpc_cred = cred,
7353 };
7354 int status;
7355
7356 nfs4_init_channel_attrs(&args);
7357 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7358
7359 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7360 trace_nfs4_create_session(clp, status);
7361
7362 if (!status) {
7363 /* Verify the session's negotiated channel_attrs values */
7364 status = nfs4_verify_channel_attrs(&args, &res);
7365 /* Increment the clientid slot sequence id */
7366 if (clp->cl_seqid == res.seqid)
7367 clp->cl_seqid++;
7368 if (status)
7369 goto out;
7370 nfs4_update_session(session, &res);
7371 }
7372 out:
7373 return status;
7374 }
7375
7376 /*
7377 * Issues a CREATE_SESSION operation to the server.
7378 * It is the responsibility of the caller to verify the session is
7379 * expired before calling this routine.
7380 */
7381 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7382 {
7383 int status;
7384 unsigned *ptr;
7385 struct nfs4_session *session = clp->cl_session;
7386
7387 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7388
7389 status = _nfs4_proc_create_session(clp, cred);
7390 if (status)
7391 goto out;
7392
7393 /* Init or reset the session slot tables */
7394 status = nfs4_setup_session_slot_tables(session);
7395 dprintk("slot table setup returned %d\n", status);
7396 if (status)
7397 goto out;
7398
7399 ptr = (unsigned *)&session->sess_id.data[0];
7400 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7401 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7402 out:
7403 dprintk("<-- %s\n", __func__);
7404 return status;
7405 }
7406
7407 /*
7408 * Issue the over-the-wire RPC DESTROY_SESSION.
7409 * The caller must serialize access to this routine.
7410 */
7411 int nfs4_proc_destroy_session(struct nfs4_session *session,
7412 struct rpc_cred *cred)
7413 {
7414 struct rpc_message msg = {
7415 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7416 .rpc_argp = session,
7417 .rpc_cred = cred,
7418 };
7419 int status = 0;
7420
7421 dprintk("--> nfs4_proc_destroy_session\n");
7422
7423 /* session is still being setup */
7424 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7425 return 0;
7426
7427 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7428 trace_nfs4_destroy_session(session->clp, status);
7429
7430 if (status)
7431 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7432 "Session has been destroyed regardless...\n", status);
7433
7434 dprintk("<-- nfs4_proc_destroy_session\n");
7435 return status;
7436 }
7437
7438 /*
7439 * Renew the cl_session lease.
7440 */
7441 struct nfs4_sequence_data {
7442 struct nfs_client *clp;
7443 struct nfs4_sequence_args args;
7444 struct nfs4_sequence_res res;
7445 };
7446
7447 static void nfs41_sequence_release(void *data)
7448 {
7449 struct nfs4_sequence_data *calldata = data;
7450 struct nfs_client *clp = calldata->clp;
7451
7452 if (atomic_read(&clp->cl_count) > 1)
7453 nfs4_schedule_state_renewal(clp);
7454 nfs_put_client(clp);
7455 kfree(calldata);
7456 }
7457
7458 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7459 {
7460 switch(task->tk_status) {
7461 case -NFS4ERR_DELAY:
7462 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7463 return -EAGAIN;
7464 default:
7465 nfs4_schedule_lease_recovery(clp);
7466 }
7467 return 0;
7468 }
7469
7470 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7471 {
7472 struct nfs4_sequence_data *calldata = data;
7473 struct nfs_client *clp = calldata->clp;
7474
7475 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7476 return;
7477
7478 trace_nfs4_sequence(clp, task->tk_status);
7479 if (task->tk_status < 0) {
7480 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7481 if (atomic_read(&clp->cl_count) == 1)
7482 goto out;
7483
7484 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7485 rpc_restart_call_prepare(task);
7486 return;
7487 }
7488 }
7489 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7490 out:
7491 dprintk("<-- %s\n", __func__);
7492 }
7493
7494 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7495 {
7496 struct nfs4_sequence_data *calldata = data;
7497 struct nfs_client *clp = calldata->clp;
7498 struct nfs4_sequence_args *args;
7499 struct nfs4_sequence_res *res;
7500
7501 args = task->tk_msg.rpc_argp;
7502 res = task->tk_msg.rpc_resp;
7503
7504 nfs41_setup_sequence(clp->cl_session, args, res, task);
7505 }
7506
7507 static const struct rpc_call_ops nfs41_sequence_ops = {
7508 .rpc_call_done = nfs41_sequence_call_done,
7509 .rpc_call_prepare = nfs41_sequence_prepare,
7510 .rpc_release = nfs41_sequence_release,
7511 };
7512
7513 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7514 struct rpc_cred *cred,
7515 bool is_privileged)
7516 {
7517 struct nfs4_sequence_data *calldata;
7518 struct rpc_message msg = {
7519 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7520 .rpc_cred = cred,
7521 };
7522 struct rpc_task_setup task_setup_data = {
7523 .rpc_client = clp->cl_rpcclient,
7524 .rpc_message = &msg,
7525 .callback_ops = &nfs41_sequence_ops,
7526 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7527 };
7528
7529 if (!atomic_inc_not_zero(&clp->cl_count))
7530 return ERR_PTR(-EIO);
7531 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7532 if (calldata == NULL) {
7533 nfs_put_client(clp);
7534 return ERR_PTR(-ENOMEM);
7535 }
7536 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7537 if (is_privileged)
7538 nfs4_set_sequence_privileged(&calldata->args);
7539 msg.rpc_argp = &calldata->args;
7540 msg.rpc_resp = &calldata->res;
7541 calldata->clp = clp;
7542 task_setup_data.callback_data = calldata;
7543
7544 return rpc_run_task(&task_setup_data);
7545 }
7546
7547 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7548 {
7549 struct rpc_task *task;
7550 int ret = 0;
7551
7552 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7553 return -EAGAIN;
7554 task = _nfs41_proc_sequence(clp, cred, false);
7555 if (IS_ERR(task))
7556 ret = PTR_ERR(task);
7557 else
7558 rpc_put_task_async(task);
7559 dprintk("<-- %s status=%d\n", __func__, ret);
7560 return ret;
7561 }
7562
7563 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7564 {
7565 struct rpc_task *task;
7566 int ret;
7567
7568 task = _nfs41_proc_sequence(clp, cred, true);
7569 if (IS_ERR(task)) {
7570 ret = PTR_ERR(task);
7571 goto out;
7572 }
7573 ret = rpc_wait_for_completion_task(task);
7574 if (!ret) {
7575 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
7576
7577 if (task->tk_status == 0)
7578 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
7579 ret = task->tk_status;
7580 }
7581 rpc_put_task(task);
7582 out:
7583 dprintk("<-- %s status=%d\n", __func__, ret);
7584 return ret;
7585 }
7586
7587 struct nfs4_reclaim_complete_data {
7588 struct nfs_client *clp;
7589 struct nfs41_reclaim_complete_args arg;
7590 struct nfs41_reclaim_complete_res res;
7591 };
7592
7593 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7594 {
7595 struct nfs4_reclaim_complete_data *calldata = data;
7596
7597 nfs41_setup_sequence(calldata->clp->cl_session,
7598 &calldata->arg.seq_args,
7599 &calldata->res.seq_res,
7600 task);
7601 }
7602
7603 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7604 {
7605 switch(task->tk_status) {
7606 case 0:
7607 case -NFS4ERR_COMPLETE_ALREADY:
7608 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7609 break;
7610 case -NFS4ERR_DELAY:
7611 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7612 /* fall through */
7613 case -NFS4ERR_RETRY_UNCACHED_REP:
7614 return -EAGAIN;
7615 default:
7616 nfs4_schedule_lease_recovery(clp);
7617 }
7618 return 0;
7619 }
7620
7621 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7622 {
7623 struct nfs4_reclaim_complete_data *calldata = data;
7624 struct nfs_client *clp = calldata->clp;
7625 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7626
7627 dprintk("--> %s\n", __func__);
7628 if (!nfs41_sequence_done(task, res))
7629 return;
7630
7631 trace_nfs4_reclaim_complete(clp, task->tk_status);
7632 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7633 rpc_restart_call_prepare(task);
7634 return;
7635 }
7636 dprintk("<-- %s\n", __func__);
7637 }
7638
7639 static void nfs4_free_reclaim_complete_data(void *data)
7640 {
7641 struct nfs4_reclaim_complete_data *calldata = data;
7642
7643 kfree(calldata);
7644 }
7645
7646 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7647 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7648 .rpc_call_done = nfs4_reclaim_complete_done,
7649 .rpc_release = nfs4_free_reclaim_complete_data,
7650 };
7651
7652 /*
7653 * Issue a global reclaim complete.
7654 */
7655 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7656 struct rpc_cred *cred)
7657 {
7658 struct nfs4_reclaim_complete_data *calldata;
7659 struct rpc_task *task;
7660 struct rpc_message msg = {
7661 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7662 .rpc_cred = cred,
7663 };
7664 struct rpc_task_setup task_setup_data = {
7665 .rpc_client = clp->cl_rpcclient,
7666 .rpc_message = &msg,
7667 .callback_ops = &nfs4_reclaim_complete_call_ops,
7668 .flags = RPC_TASK_ASYNC,
7669 };
7670 int status = -ENOMEM;
7671
7672 dprintk("--> %s\n", __func__);
7673 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7674 if (calldata == NULL)
7675 goto out;
7676 calldata->clp = clp;
7677 calldata->arg.one_fs = 0;
7678
7679 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7680 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7681 msg.rpc_argp = &calldata->arg;
7682 msg.rpc_resp = &calldata->res;
7683 task_setup_data.callback_data = calldata;
7684 task = rpc_run_task(&task_setup_data);
7685 if (IS_ERR(task)) {
7686 status = PTR_ERR(task);
7687 goto out;
7688 }
7689 status = nfs4_wait_for_completion_rpc_task(task);
7690 if (status == 0)
7691 status = task->tk_status;
7692 rpc_put_task(task);
7693 return 0;
7694 out:
7695 dprintk("<-- %s status=%d\n", __func__, status);
7696 return status;
7697 }
7698
7699 static void
7700 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7701 {
7702 struct nfs4_layoutget *lgp = calldata;
7703 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7704 struct nfs4_session *session = nfs4_get_session(server);
7705
7706 dprintk("--> %s\n", __func__);
7707 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
7708 * right now covering the LAYOUTGET we are about to send.
7709 * However, that is not so catastrophic, and there seems
7710 * to be no way to prevent it completely.
7711 */
7712 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
7713 &lgp->res.seq_res, task))
7714 return;
7715 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
7716 NFS_I(lgp->args.inode)->layout,
7717 &lgp->args.range,
7718 lgp->args.ctx->state)) {
7719 rpc_exit(task, NFS4_OK);
7720 }
7721 }
7722
7723 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7724 {
7725 struct nfs4_layoutget *lgp = calldata;
7726 struct inode *inode = lgp->args.inode;
7727 struct nfs_server *server = NFS_SERVER(inode);
7728 struct pnfs_layout_hdr *lo;
7729 struct nfs4_state *state = NULL;
7730 unsigned long timeo, now, giveup;
7731
7732 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7733
7734 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
7735 goto out;
7736
7737 switch (task->tk_status) {
7738 case 0:
7739 goto out;
7740 /*
7741 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7742 * (or clients) writing to the same RAID stripe
7743 */
7744 case -NFS4ERR_LAYOUTTRYLATER:
7745 /*
7746 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
7747 * existing layout before getting a new one).
7748 */
7749 case -NFS4ERR_RECALLCONFLICT:
7750 timeo = rpc_get_timeout(task->tk_client);
7751 giveup = lgp->args.timestamp + timeo;
7752 now = jiffies;
7753 if (time_after(giveup, now)) {
7754 unsigned long delay;
7755
7756 /* Delay for:
7757 * - Not less then NFS4_POLL_RETRY_MIN.
7758 * - One last time a jiffie before we give up
7759 * - exponential backoff (time_now minus start_attempt)
7760 */
7761 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
7762 min((giveup - now - 1),
7763 now - lgp->args.timestamp));
7764
7765 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7766 __func__, delay);
7767 rpc_delay(task, delay);
7768 task->tk_status = 0;
7769 rpc_restart_call_prepare(task);
7770 goto out; /* Do not call nfs4_async_handle_error() */
7771 }
7772 break;
7773 case -NFS4ERR_EXPIRED:
7774 case -NFS4ERR_BAD_STATEID:
7775 spin_lock(&inode->i_lock);
7776 lo = NFS_I(inode)->layout;
7777 if (!lo || list_empty(&lo->plh_segs)) {
7778 spin_unlock(&inode->i_lock);
7779 /* If the open stateid was bad, then recover it. */
7780 state = lgp->args.ctx->state;
7781 } else {
7782 LIST_HEAD(head);
7783
7784 /*
7785 * Mark the bad layout state as invalid, then retry
7786 * with the current stateid.
7787 */
7788 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7789 spin_unlock(&inode->i_lock);
7790 pnfs_free_lseg_list(&head);
7791
7792 task->tk_status = 0;
7793 rpc_restart_call_prepare(task);
7794 }
7795 }
7796 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
7797 rpc_restart_call_prepare(task);
7798 out:
7799 dprintk("<-- %s\n", __func__);
7800 }
7801
7802 static size_t max_response_pages(struct nfs_server *server)
7803 {
7804 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
7805 return nfs_page_array_len(0, max_resp_sz);
7806 }
7807
7808 static void nfs4_free_pages(struct page **pages, size_t size)
7809 {
7810 int i;
7811
7812 if (!pages)
7813 return;
7814
7815 for (i = 0; i < size; i++) {
7816 if (!pages[i])
7817 break;
7818 __free_page(pages[i]);
7819 }
7820 kfree(pages);
7821 }
7822
7823 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
7824 {
7825 struct page **pages;
7826 int i;
7827
7828 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
7829 if (!pages) {
7830 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
7831 return NULL;
7832 }
7833
7834 for (i = 0; i < size; i++) {
7835 pages[i] = alloc_page(gfp_flags);
7836 if (!pages[i]) {
7837 dprintk("%s: failed to allocate page\n", __func__);
7838 nfs4_free_pages(pages, size);
7839 return NULL;
7840 }
7841 }
7842
7843 return pages;
7844 }
7845
7846 static void nfs4_layoutget_release(void *calldata)
7847 {
7848 struct nfs4_layoutget *lgp = calldata;
7849 struct inode *inode = lgp->args.inode;
7850 struct nfs_server *server = NFS_SERVER(inode);
7851 size_t max_pages = max_response_pages(server);
7852
7853 dprintk("--> %s\n", __func__);
7854 nfs4_free_pages(lgp->args.layout.pages, max_pages);
7855 pnfs_put_layout_hdr(NFS_I(inode)->layout);
7856 put_nfs_open_context(lgp->args.ctx);
7857 kfree(calldata);
7858 dprintk("<-- %s\n", __func__);
7859 }
7860
7861 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
7862 .rpc_call_prepare = nfs4_layoutget_prepare,
7863 .rpc_call_done = nfs4_layoutget_done,
7864 .rpc_release = nfs4_layoutget_release,
7865 };
7866
7867 struct pnfs_layout_segment *
7868 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
7869 {
7870 struct inode *inode = lgp->args.inode;
7871 struct nfs_server *server = NFS_SERVER(inode);
7872 size_t max_pages = max_response_pages(server);
7873 struct rpc_task *task;
7874 struct rpc_message msg = {
7875 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
7876 .rpc_argp = &lgp->args,
7877 .rpc_resp = &lgp->res,
7878 .rpc_cred = lgp->cred,
7879 };
7880 struct rpc_task_setup task_setup_data = {
7881 .rpc_client = server->client,
7882 .rpc_message = &msg,
7883 .callback_ops = &nfs4_layoutget_call_ops,
7884 .callback_data = lgp,
7885 .flags = RPC_TASK_ASYNC,
7886 };
7887 struct pnfs_layout_segment *lseg = NULL;
7888 int status = 0;
7889
7890 dprintk("--> %s\n", __func__);
7891
7892 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
7893 pnfs_get_layout_hdr(NFS_I(inode)->layout);
7894
7895 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
7896 if (!lgp->args.layout.pages) {
7897 nfs4_layoutget_release(lgp);
7898 return ERR_PTR(-ENOMEM);
7899 }
7900 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
7901 lgp->args.timestamp = jiffies;
7902
7903 lgp->res.layoutp = &lgp->args.layout;
7904 lgp->res.seq_res.sr_slot = NULL;
7905 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
7906
7907 task = rpc_run_task(&task_setup_data);
7908 if (IS_ERR(task))
7909 return ERR_CAST(task);
7910 status = nfs4_wait_for_completion_rpc_task(task);
7911 if (status == 0)
7912 status = task->tk_status;
7913 trace_nfs4_layoutget(lgp->args.ctx,
7914 &lgp->args.range,
7915 &lgp->res.range,
7916 status);
7917 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
7918 if (status == 0 && lgp->res.layoutp->len)
7919 lseg = pnfs_layout_process(lgp);
7920 rpc_put_task(task);
7921 dprintk("<-- %s status=%d\n", __func__, status);
7922 if (status)
7923 return ERR_PTR(status);
7924 return lseg;
7925 }
7926
7927 static void
7928 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
7929 {
7930 struct nfs4_layoutreturn *lrp = calldata;
7931
7932 dprintk("--> %s\n", __func__);
7933 nfs41_setup_sequence(lrp->clp->cl_session,
7934 &lrp->args.seq_args,
7935 &lrp->res.seq_res,
7936 task);
7937 }
7938
7939 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
7940 {
7941 struct nfs4_layoutreturn *lrp = calldata;
7942 struct nfs_server *server;
7943
7944 dprintk("--> %s\n", __func__);
7945
7946 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
7947 return;
7948
7949 server = NFS_SERVER(lrp->args.inode);
7950 switch (task->tk_status) {
7951 default:
7952 task->tk_status = 0;
7953 case 0:
7954 break;
7955 case -NFS4ERR_DELAY:
7956 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
7957 break;
7958 rpc_restart_call_prepare(task);
7959 return;
7960 }
7961 dprintk("<-- %s\n", __func__);
7962 }
7963
7964 static void nfs4_layoutreturn_release(void *calldata)
7965 {
7966 struct nfs4_layoutreturn *lrp = calldata;
7967 struct pnfs_layout_hdr *lo = lrp->args.layout;
7968
7969 dprintk("--> %s\n", __func__);
7970 spin_lock(&lo->plh_inode->i_lock);
7971 if (lrp->res.lrs_present)
7972 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
7973 pnfs_clear_layoutreturn_waitbit(lo);
7974 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
7975 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
7976 lo->plh_block_lgets--;
7977 spin_unlock(&lo->plh_inode->i_lock);
7978 pnfs_put_layout_hdr(lrp->args.layout);
7979 nfs_iput_and_deactive(lrp->inode);
7980 kfree(calldata);
7981 dprintk("<-- %s\n", __func__);
7982 }
7983
7984 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
7985 .rpc_call_prepare = nfs4_layoutreturn_prepare,
7986 .rpc_call_done = nfs4_layoutreturn_done,
7987 .rpc_release = nfs4_layoutreturn_release,
7988 };
7989
7990 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
7991 {
7992 struct rpc_task *task;
7993 struct rpc_message msg = {
7994 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
7995 .rpc_argp = &lrp->args,
7996 .rpc_resp = &lrp->res,
7997 .rpc_cred = lrp->cred,
7998 };
7999 struct rpc_task_setup task_setup_data = {
8000 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8001 .rpc_message = &msg,
8002 .callback_ops = &nfs4_layoutreturn_call_ops,
8003 .callback_data = lrp,
8004 };
8005 int status = 0;
8006
8007 dprintk("--> %s\n", __func__);
8008 if (!sync) {
8009 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8010 if (!lrp->inode) {
8011 nfs4_layoutreturn_release(lrp);
8012 return -EAGAIN;
8013 }
8014 task_setup_data.flags |= RPC_TASK_ASYNC;
8015 }
8016 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8017 task = rpc_run_task(&task_setup_data);
8018 if (IS_ERR(task))
8019 return PTR_ERR(task);
8020 if (sync)
8021 status = task->tk_status;
8022 trace_nfs4_layoutreturn(lrp->args.inode, status);
8023 dprintk("<-- %s status=%d\n", __func__, status);
8024 rpc_put_task(task);
8025 return status;
8026 }
8027
8028 static int
8029 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
8030 struct pnfs_device *pdev,
8031 struct rpc_cred *cred)
8032 {
8033 struct nfs4_getdeviceinfo_args args = {
8034 .pdev = pdev,
8035 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8036 NOTIFY_DEVICEID4_DELETE,
8037 };
8038 struct nfs4_getdeviceinfo_res res = {
8039 .pdev = pdev,
8040 };
8041 struct rpc_message msg = {
8042 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8043 .rpc_argp = &args,
8044 .rpc_resp = &res,
8045 .rpc_cred = cred,
8046 };
8047 int status;
8048
8049 dprintk("--> %s\n", __func__);
8050 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8051 if (res.notification & ~args.notify_types)
8052 dprintk("%s: unsupported notification\n", __func__);
8053 if (res.notification != args.notify_types)
8054 pdev->nocache = 1;
8055
8056 dprintk("<-- %s status=%d\n", __func__, status);
8057
8058 return status;
8059 }
8060
8061 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8062 struct pnfs_device *pdev,
8063 struct rpc_cred *cred)
8064 {
8065 struct nfs4_exception exception = { };
8066 int err;
8067
8068 do {
8069 err = nfs4_handle_exception(server,
8070 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8071 &exception);
8072 } while (exception.retry);
8073 return err;
8074 }
8075 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8076
8077 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8078 {
8079 struct nfs4_layoutcommit_data *data = calldata;
8080 struct nfs_server *server = NFS_SERVER(data->args.inode);
8081 struct nfs4_session *session = nfs4_get_session(server);
8082
8083 nfs41_setup_sequence(session,
8084 &data->args.seq_args,
8085 &data->res.seq_res,
8086 task);
8087 }
8088
8089 static void
8090 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8091 {
8092 struct nfs4_layoutcommit_data *data = calldata;
8093 struct nfs_server *server = NFS_SERVER(data->args.inode);
8094
8095 if (!nfs41_sequence_done(task, &data->res.seq_res))
8096 return;
8097
8098 switch (task->tk_status) { /* Just ignore these failures */
8099 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8100 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8101 case -NFS4ERR_BADLAYOUT: /* no layout */
8102 case -NFS4ERR_GRACE: /* loca_recalim always false */
8103 task->tk_status = 0;
8104 case 0:
8105 break;
8106 default:
8107 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8108 rpc_restart_call_prepare(task);
8109 return;
8110 }
8111 }
8112 }
8113
8114 static void nfs4_layoutcommit_release(void *calldata)
8115 {
8116 struct nfs4_layoutcommit_data *data = calldata;
8117
8118 pnfs_cleanup_layoutcommit(data);
8119 nfs_post_op_update_inode_force_wcc(data->args.inode,
8120 data->res.fattr);
8121 put_rpccred(data->cred);
8122 nfs_iput_and_deactive(data->inode);
8123 kfree(data);
8124 }
8125
8126 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8127 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8128 .rpc_call_done = nfs4_layoutcommit_done,
8129 .rpc_release = nfs4_layoutcommit_release,
8130 };
8131
8132 int
8133 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8134 {
8135 struct rpc_message msg = {
8136 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8137 .rpc_argp = &data->args,
8138 .rpc_resp = &data->res,
8139 .rpc_cred = data->cred,
8140 };
8141 struct rpc_task_setup task_setup_data = {
8142 .task = &data->task,
8143 .rpc_client = NFS_CLIENT(data->args.inode),
8144 .rpc_message = &msg,
8145 .callback_ops = &nfs4_layoutcommit_ops,
8146 .callback_data = data,
8147 };
8148 struct rpc_task *task;
8149 int status = 0;
8150
8151 dprintk("NFS: initiating layoutcommit call. sync %d "
8152 "lbw: %llu inode %lu\n", sync,
8153 data->args.lastbytewritten,
8154 data->args.inode->i_ino);
8155
8156 if (!sync) {
8157 data->inode = nfs_igrab_and_active(data->args.inode);
8158 if (data->inode == NULL) {
8159 nfs4_layoutcommit_release(data);
8160 return -EAGAIN;
8161 }
8162 task_setup_data.flags = RPC_TASK_ASYNC;
8163 }
8164 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8165 task = rpc_run_task(&task_setup_data);
8166 if (IS_ERR(task))
8167 return PTR_ERR(task);
8168 if (sync)
8169 status = task->tk_status;
8170 trace_nfs4_layoutcommit(data->args.inode, status);
8171 dprintk("%s: status %d\n", __func__, status);
8172 rpc_put_task(task);
8173 return status;
8174 }
8175
8176 /**
8177 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8178 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8179 */
8180 static int
8181 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8182 struct nfs_fsinfo *info,
8183 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8184 {
8185 struct nfs41_secinfo_no_name_args args = {
8186 .style = SECINFO_STYLE_CURRENT_FH,
8187 };
8188 struct nfs4_secinfo_res res = {
8189 .flavors = flavors,
8190 };
8191 struct rpc_message msg = {
8192 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8193 .rpc_argp = &args,
8194 .rpc_resp = &res,
8195 };
8196 struct rpc_clnt *clnt = server->client;
8197 struct rpc_cred *cred = NULL;
8198 int status;
8199
8200 if (use_integrity) {
8201 clnt = server->nfs_client->cl_rpcclient;
8202 cred = nfs4_get_clid_cred(server->nfs_client);
8203 msg.rpc_cred = cred;
8204 }
8205
8206 dprintk("--> %s\n", __func__);
8207 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8208 &res.seq_res, 0);
8209 dprintk("<-- %s status=%d\n", __func__, status);
8210
8211 if (cred)
8212 put_rpccred(cred);
8213
8214 return status;
8215 }
8216
8217 static int
8218 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8219 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8220 {
8221 struct nfs4_exception exception = { };
8222 int err;
8223 do {
8224 /* first try using integrity protection */
8225 err = -NFS4ERR_WRONGSEC;
8226
8227 /* try to use integrity protection with machine cred */
8228 if (_nfs4_is_integrity_protected(server->nfs_client))
8229 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8230 flavors, true);
8231
8232 /*
8233 * if unable to use integrity protection, or SECINFO with
8234 * integrity protection returns NFS4ERR_WRONGSEC (which is
8235 * disallowed by spec, but exists in deployed servers) use
8236 * the current filesystem's rpc_client and the user cred.
8237 */
8238 if (err == -NFS4ERR_WRONGSEC)
8239 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8240 flavors, false);
8241
8242 switch (err) {
8243 case 0:
8244 case -NFS4ERR_WRONGSEC:
8245 case -ENOTSUPP:
8246 goto out;
8247 default:
8248 err = nfs4_handle_exception(server, err, &exception);
8249 }
8250 } while (exception.retry);
8251 out:
8252 return err;
8253 }
8254
8255 static int
8256 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8257 struct nfs_fsinfo *info)
8258 {
8259 int err;
8260 struct page *page;
8261 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8262 struct nfs4_secinfo_flavors *flavors;
8263 struct nfs4_secinfo4 *secinfo;
8264 int i;
8265
8266 page = alloc_page(GFP_KERNEL);
8267 if (!page) {
8268 err = -ENOMEM;
8269 goto out;
8270 }
8271
8272 flavors = page_address(page);
8273 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8274
8275 /*
8276 * Fall back on "guess and check" method if
8277 * the server doesn't support SECINFO_NO_NAME
8278 */
8279 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8280 err = nfs4_find_root_sec(server, fhandle, info);
8281 goto out_freepage;
8282 }
8283 if (err)
8284 goto out_freepage;
8285
8286 for (i = 0; i < flavors->num_flavors; i++) {
8287 secinfo = &flavors->flavors[i];
8288
8289 switch (secinfo->flavor) {
8290 case RPC_AUTH_NULL:
8291 case RPC_AUTH_UNIX:
8292 case RPC_AUTH_GSS:
8293 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8294 &secinfo->flavor_info);
8295 break;
8296 default:
8297 flavor = RPC_AUTH_MAXFLAVOR;
8298 break;
8299 }
8300
8301 if (!nfs_auth_info_match(&server->auth_info, flavor))
8302 flavor = RPC_AUTH_MAXFLAVOR;
8303
8304 if (flavor != RPC_AUTH_MAXFLAVOR) {
8305 err = nfs4_lookup_root_sec(server, fhandle,
8306 info, flavor);
8307 if (!err)
8308 break;
8309 }
8310 }
8311
8312 if (flavor == RPC_AUTH_MAXFLAVOR)
8313 err = -EPERM;
8314
8315 out_freepage:
8316 put_page(page);
8317 if (err == -EACCES)
8318 return -EPERM;
8319 out:
8320 return err;
8321 }
8322
8323 static int _nfs41_test_stateid(struct nfs_server *server,
8324 nfs4_stateid *stateid,
8325 struct rpc_cred *cred)
8326 {
8327 int status;
8328 struct nfs41_test_stateid_args args = {
8329 .stateid = stateid,
8330 };
8331 struct nfs41_test_stateid_res res;
8332 struct rpc_message msg = {
8333 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8334 .rpc_argp = &args,
8335 .rpc_resp = &res,
8336 .rpc_cred = cred,
8337 };
8338 struct rpc_clnt *rpc_client = server->client;
8339
8340 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8341 &rpc_client, &msg);
8342
8343 dprintk("NFS call test_stateid %p\n", stateid);
8344 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8345 nfs4_set_sequence_privileged(&args.seq_args);
8346 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8347 &args.seq_args, &res.seq_res);
8348 if (status != NFS_OK) {
8349 dprintk("NFS reply test_stateid: failed, %d\n", status);
8350 return status;
8351 }
8352 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8353 return -res.status;
8354 }
8355
8356 /**
8357 * nfs41_test_stateid - perform a TEST_STATEID operation
8358 *
8359 * @server: server / transport on which to perform the operation
8360 * @stateid: state ID to test
8361 * @cred: credential
8362 *
8363 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8364 * Otherwise a negative NFS4ERR value is returned if the operation
8365 * failed or the state ID is not currently valid.
8366 */
8367 static int nfs41_test_stateid(struct nfs_server *server,
8368 nfs4_stateid *stateid,
8369 struct rpc_cred *cred)
8370 {
8371 struct nfs4_exception exception = { };
8372 int err;
8373 do {
8374 err = _nfs41_test_stateid(server, stateid, cred);
8375 if (err != -NFS4ERR_DELAY)
8376 break;
8377 nfs4_handle_exception(server, err, &exception);
8378 } while (exception.retry);
8379 return err;
8380 }
8381
8382 struct nfs_free_stateid_data {
8383 struct nfs_server *server;
8384 struct nfs41_free_stateid_args args;
8385 struct nfs41_free_stateid_res res;
8386 };
8387
8388 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8389 {
8390 struct nfs_free_stateid_data *data = calldata;
8391 nfs41_setup_sequence(nfs4_get_session(data->server),
8392 &data->args.seq_args,
8393 &data->res.seq_res,
8394 task);
8395 }
8396
8397 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8398 {
8399 struct nfs_free_stateid_data *data = calldata;
8400
8401 nfs41_sequence_done(task, &data->res.seq_res);
8402
8403 switch (task->tk_status) {
8404 case -NFS4ERR_DELAY:
8405 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8406 rpc_restart_call_prepare(task);
8407 }
8408 }
8409
8410 static void nfs41_free_stateid_release(void *calldata)
8411 {
8412 kfree(calldata);
8413 }
8414
8415 static const struct rpc_call_ops nfs41_free_stateid_ops = {
8416 .rpc_call_prepare = nfs41_free_stateid_prepare,
8417 .rpc_call_done = nfs41_free_stateid_done,
8418 .rpc_release = nfs41_free_stateid_release,
8419 };
8420
8421 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8422 nfs4_stateid *stateid,
8423 struct rpc_cred *cred,
8424 bool privileged)
8425 {
8426 struct rpc_message msg = {
8427 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8428 .rpc_cred = cred,
8429 };
8430 struct rpc_task_setup task_setup = {
8431 .rpc_client = server->client,
8432 .rpc_message = &msg,
8433 .callback_ops = &nfs41_free_stateid_ops,
8434 .flags = RPC_TASK_ASYNC,
8435 };
8436 struct nfs_free_stateid_data *data;
8437
8438 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8439 &task_setup.rpc_client, &msg);
8440
8441 dprintk("NFS call free_stateid %p\n", stateid);
8442 data = kmalloc(sizeof(*data), GFP_NOFS);
8443 if (!data)
8444 return ERR_PTR(-ENOMEM);
8445 data->server = server;
8446 nfs4_stateid_copy(&data->args.stateid, stateid);
8447
8448 task_setup.callback_data = data;
8449
8450 msg.rpc_argp = &data->args;
8451 msg.rpc_resp = &data->res;
8452 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8453 if (privileged)
8454 nfs4_set_sequence_privileged(&data->args.seq_args);
8455
8456 return rpc_run_task(&task_setup);
8457 }
8458
8459 /**
8460 * nfs41_free_stateid - perform a FREE_STATEID operation
8461 *
8462 * @server: server / transport on which to perform the operation
8463 * @stateid: state ID to release
8464 * @cred: credential
8465 *
8466 * Returns NFS_OK if the server freed "stateid". Otherwise a
8467 * negative NFS4ERR value is returned.
8468 */
8469 static int nfs41_free_stateid(struct nfs_server *server,
8470 nfs4_stateid *stateid,
8471 struct rpc_cred *cred)
8472 {
8473 struct rpc_task *task;
8474 int ret;
8475
8476 task = _nfs41_free_stateid(server, stateid, cred, true);
8477 if (IS_ERR(task))
8478 return PTR_ERR(task);
8479 ret = rpc_wait_for_completion_task(task);
8480 if (!ret)
8481 ret = task->tk_status;
8482 rpc_put_task(task);
8483 return ret;
8484 }
8485
8486 static void
8487 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8488 {
8489 struct rpc_task *task;
8490 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8491
8492 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8493 nfs4_free_lock_state(server, lsp);
8494 if (IS_ERR(task))
8495 return;
8496 rpc_put_task(task);
8497 }
8498
8499 static bool nfs41_match_stateid(const nfs4_stateid *s1,
8500 const nfs4_stateid *s2)
8501 {
8502 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8503 return false;
8504
8505 if (s1->seqid == s2->seqid)
8506 return true;
8507 if (s1->seqid == 0 || s2->seqid == 0)
8508 return true;
8509
8510 return false;
8511 }
8512
8513 #endif /* CONFIG_NFS_V4_1 */
8514
8515 static bool nfs4_match_stateid(const nfs4_stateid *s1,
8516 const nfs4_stateid *s2)
8517 {
8518 return nfs4_stateid_match(s1, s2);
8519 }
8520
8521
8522 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8523 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8524 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8525 .recover_open = nfs4_open_reclaim,
8526 .recover_lock = nfs4_lock_reclaim,
8527 .establish_clid = nfs4_init_clientid,
8528 .detect_trunking = nfs40_discover_server_trunking,
8529 };
8530
8531 #if defined(CONFIG_NFS_V4_1)
8532 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8533 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8534 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8535 .recover_open = nfs4_open_reclaim,
8536 .recover_lock = nfs4_lock_reclaim,
8537 .establish_clid = nfs41_init_clientid,
8538 .reclaim_complete = nfs41_proc_reclaim_complete,
8539 .detect_trunking = nfs41_discover_server_trunking,
8540 };
8541 #endif /* CONFIG_NFS_V4_1 */
8542
8543 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8544 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8545 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8546 .recover_open = nfs40_open_expired,
8547 .recover_lock = nfs4_lock_expired,
8548 .establish_clid = nfs4_init_clientid,
8549 };
8550
8551 #if defined(CONFIG_NFS_V4_1)
8552 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8553 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8554 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8555 .recover_open = nfs41_open_expired,
8556 .recover_lock = nfs41_lock_expired,
8557 .establish_clid = nfs41_init_clientid,
8558 };
8559 #endif /* CONFIG_NFS_V4_1 */
8560
8561 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8562 .sched_state_renewal = nfs4_proc_async_renew,
8563 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8564 .renew_lease = nfs4_proc_renew,
8565 };
8566
8567 #if defined(CONFIG_NFS_V4_1)
8568 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8569 .sched_state_renewal = nfs41_proc_async_sequence,
8570 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8571 .renew_lease = nfs4_proc_sequence,
8572 };
8573 #endif
8574
8575 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8576 .get_locations = _nfs40_proc_get_locations,
8577 .fsid_present = _nfs40_proc_fsid_present,
8578 };
8579
8580 #if defined(CONFIG_NFS_V4_1)
8581 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8582 .get_locations = _nfs41_proc_get_locations,
8583 .fsid_present = _nfs41_proc_fsid_present,
8584 };
8585 #endif /* CONFIG_NFS_V4_1 */
8586
8587 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8588 .minor_version = 0,
8589 .init_caps = NFS_CAP_READDIRPLUS
8590 | NFS_CAP_ATOMIC_OPEN
8591 | NFS_CAP_CHANGE_ATTR
8592 | NFS_CAP_POSIX_LOCK,
8593 .init_client = nfs40_init_client,
8594 .shutdown_client = nfs40_shutdown_client,
8595 .match_stateid = nfs4_match_stateid,
8596 .find_root_sec = nfs4_find_root_sec,
8597 .free_lock_state = nfs4_release_lockowner,
8598 .alloc_seqid = nfs_alloc_seqid,
8599 .call_sync_ops = &nfs40_call_sync_ops,
8600 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8601 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8602 .state_renewal_ops = &nfs40_state_renewal_ops,
8603 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8604 };
8605
8606 #if defined(CONFIG_NFS_V4_1)
8607 static struct nfs_seqid *
8608 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8609 {
8610 return NULL;
8611 }
8612
8613 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8614 .minor_version = 1,
8615 .init_caps = NFS_CAP_READDIRPLUS
8616 | NFS_CAP_ATOMIC_OPEN
8617 | NFS_CAP_CHANGE_ATTR
8618 | NFS_CAP_POSIX_LOCK
8619 | NFS_CAP_STATEID_NFSV41
8620 | NFS_CAP_ATOMIC_OPEN_V1,
8621 .init_client = nfs41_init_client,
8622 .shutdown_client = nfs41_shutdown_client,
8623 .match_stateid = nfs41_match_stateid,
8624 .find_root_sec = nfs41_find_root_sec,
8625 .free_lock_state = nfs41_free_lock_state,
8626 .alloc_seqid = nfs_alloc_no_seqid,
8627 .call_sync_ops = &nfs41_call_sync_ops,
8628 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8629 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8630 .state_renewal_ops = &nfs41_state_renewal_ops,
8631 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8632 };
8633 #endif
8634
8635 #if defined(CONFIG_NFS_V4_2)
8636 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8637 .minor_version = 2,
8638 .init_caps = NFS_CAP_READDIRPLUS
8639 | NFS_CAP_ATOMIC_OPEN
8640 | NFS_CAP_CHANGE_ATTR
8641 | NFS_CAP_POSIX_LOCK
8642 | NFS_CAP_STATEID_NFSV41
8643 | NFS_CAP_ATOMIC_OPEN_V1
8644 | NFS_CAP_ALLOCATE
8645 | NFS_CAP_DEALLOCATE
8646 | NFS_CAP_SEEK
8647 | NFS_CAP_LAYOUTSTATS,
8648 .init_client = nfs41_init_client,
8649 .shutdown_client = nfs41_shutdown_client,
8650 .match_stateid = nfs41_match_stateid,
8651 .find_root_sec = nfs41_find_root_sec,
8652 .free_lock_state = nfs41_free_lock_state,
8653 .call_sync_ops = &nfs41_call_sync_ops,
8654 .alloc_seqid = nfs_alloc_no_seqid,
8655 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8656 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8657 .state_renewal_ops = &nfs41_state_renewal_ops,
8658 };
8659 #endif
8660
8661 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8662 [0] = &nfs_v4_0_minor_ops,
8663 #if defined(CONFIG_NFS_V4_1)
8664 [1] = &nfs_v4_1_minor_ops,
8665 #endif
8666 #if defined(CONFIG_NFS_V4_2)
8667 [2] = &nfs_v4_2_minor_ops,
8668 #endif
8669 };
8670
8671 static const struct inode_operations nfs4_dir_inode_operations = {
8672 .create = nfs_create,
8673 .lookup = nfs_lookup,
8674 .atomic_open = nfs_atomic_open,
8675 .link = nfs_link,
8676 .unlink = nfs_unlink,
8677 .symlink = nfs_symlink,
8678 .mkdir = nfs_mkdir,
8679 .rmdir = nfs_rmdir,
8680 .mknod = nfs_mknod,
8681 .rename = nfs_rename,
8682 .permission = nfs_permission,
8683 .getattr = nfs_getattr,
8684 .setattr = nfs_setattr,
8685 .getxattr = generic_getxattr,
8686 .setxattr = generic_setxattr,
8687 .listxattr = generic_listxattr,
8688 .removexattr = generic_removexattr,
8689 };
8690
8691 static const struct inode_operations nfs4_file_inode_operations = {
8692 .permission = nfs_permission,
8693 .getattr = nfs_getattr,
8694 .setattr = nfs_setattr,
8695 .getxattr = generic_getxattr,
8696 .setxattr = generic_setxattr,
8697 .listxattr = generic_listxattr,
8698 .removexattr = generic_removexattr,
8699 };
8700
8701 const struct nfs_rpc_ops nfs_v4_clientops = {
8702 .version = 4, /* protocol version */
8703 .dentry_ops = &nfs4_dentry_operations,
8704 .dir_inode_ops = &nfs4_dir_inode_operations,
8705 .file_inode_ops = &nfs4_file_inode_operations,
8706 .file_ops = &nfs4_file_operations,
8707 .getroot = nfs4_proc_get_root,
8708 .submount = nfs4_submount,
8709 .try_mount = nfs4_try_mount,
8710 .getattr = nfs4_proc_getattr,
8711 .setattr = nfs4_proc_setattr,
8712 .lookup = nfs4_proc_lookup,
8713 .access = nfs4_proc_access,
8714 .readlink = nfs4_proc_readlink,
8715 .create = nfs4_proc_create,
8716 .remove = nfs4_proc_remove,
8717 .unlink_setup = nfs4_proc_unlink_setup,
8718 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8719 .unlink_done = nfs4_proc_unlink_done,
8720 .rename_setup = nfs4_proc_rename_setup,
8721 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8722 .rename_done = nfs4_proc_rename_done,
8723 .link = nfs4_proc_link,
8724 .symlink = nfs4_proc_symlink,
8725 .mkdir = nfs4_proc_mkdir,
8726 .rmdir = nfs4_proc_remove,
8727 .readdir = nfs4_proc_readdir,
8728 .mknod = nfs4_proc_mknod,
8729 .statfs = nfs4_proc_statfs,
8730 .fsinfo = nfs4_proc_fsinfo,
8731 .pathconf = nfs4_proc_pathconf,
8732 .set_capabilities = nfs4_server_capabilities,
8733 .decode_dirent = nfs4_decode_dirent,
8734 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8735 .read_setup = nfs4_proc_read_setup,
8736 .read_done = nfs4_read_done,
8737 .write_setup = nfs4_proc_write_setup,
8738 .write_done = nfs4_write_done,
8739 .commit_setup = nfs4_proc_commit_setup,
8740 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
8741 .commit_done = nfs4_commit_done,
8742 .lock = nfs4_proc_lock,
8743 .clear_acl_cache = nfs4_zap_acl_attr,
8744 .close_context = nfs4_close_context,
8745 .open_context = nfs4_atomic_open,
8746 .have_delegation = nfs4_have_delegation,
8747 .return_delegation = nfs4_inode_return_delegation,
8748 .alloc_client = nfs4_alloc_client,
8749 .init_client = nfs4_init_client,
8750 .free_client = nfs4_free_client,
8751 .create_server = nfs4_create_server,
8752 .clone_server = nfs_clone_server,
8753 };
8754
8755 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
8756 .prefix = XATTR_NAME_NFSV4_ACL,
8757 .list = nfs4_xattr_list_nfs4_acl,
8758 .get = nfs4_xattr_get_nfs4_acl,
8759 .set = nfs4_xattr_set_nfs4_acl,
8760 };
8761
8762 const struct xattr_handler *nfs4_xattr_handlers[] = {
8763 &nfs4_xattr_nfs4_acl_handler,
8764 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
8765 &nfs4_xattr_nfs4_label_handler,
8766 #endif
8767 NULL
8768 };
8769
8770 /*
8771 * Local variables:
8772 * c-basic-offset: 8
8773 * End:
8774 */