]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/nfs/nfs4proc.c
NFS: Remove CONFIG_NFS_V4 checks from nfs_idmap.h
[mirror_ubuntu-jammy-kernel.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/nfs_idmap.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4session.h"
67 #include "fscache.h"
68
69 #include "nfs4trace.h"
70
71 #define NFSDBG_FACILITY NFSDBG_PROC
72
73 #define NFS4_POLL_RETRY_MIN (HZ/10)
74 #define NFS4_POLL_RETRY_MAX (15*HZ)
75
76 struct nfs4_opendata;
77 static int _nfs4_proc_open(struct nfs4_opendata *data);
78 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
79 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
80 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *, long *);
81 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
82 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
84 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
85 struct nfs_fattr *fattr, struct iattr *sattr,
86 struct nfs4_state *state, struct nfs4_label *ilabel,
87 struct nfs4_label *olabel);
88 #ifdef CONFIG_NFS_V4_1
89 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
90 struct rpc_cred *);
91 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
92 struct rpc_cred *);
93 #endif
94
95 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
96 static inline struct nfs4_label *
97 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
98 struct iattr *sattr, struct nfs4_label *label)
99 {
100 int err;
101
102 if (label == NULL)
103 return NULL;
104
105 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
106 return NULL;
107
108 err = security_dentry_init_security(dentry, sattr->ia_mode,
109 &dentry->d_name, (void **)&label->label, &label->len);
110 if (err == 0)
111 return label;
112
113 return NULL;
114 }
115 static inline void
116 nfs4_label_release_security(struct nfs4_label *label)
117 {
118 if (label)
119 security_release_secctx(label->label, label->len);
120 }
121 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
122 {
123 if (label)
124 return server->attr_bitmask;
125
126 return server->attr_bitmask_nl;
127 }
128 #else
129 static inline struct nfs4_label *
130 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
131 struct iattr *sattr, struct nfs4_label *l)
132 { return NULL; }
133 static inline void
134 nfs4_label_release_security(struct nfs4_label *label)
135 { return; }
136 static inline u32 *
137 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
138 { return server->attr_bitmask; }
139 #endif
140
141 /* Prevent leaks of NFSv4 errors into userland */
142 static int nfs4_map_errors(int err)
143 {
144 if (err >= -1000)
145 return err;
146 switch (err) {
147 case -NFS4ERR_RESOURCE:
148 case -NFS4ERR_LAYOUTTRYLATER:
149 case -NFS4ERR_RECALLCONFLICT:
150 return -EREMOTEIO;
151 case -NFS4ERR_WRONGSEC:
152 case -NFS4ERR_WRONG_CRED:
153 return -EPERM;
154 case -NFS4ERR_BADOWNER:
155 case -NFS4ERR_BADNAME:
156 return -EINVAL;
157 case -NFS4ERR_SHARE_DENIED:
158 return -EACCES;
159 case -NFS4ERR_MINOR_VERS_MISMATCH:
160 return -EPROTONOSUPPORT;
161 case -NFS4ERR_FILE_OPEN:
162 return -EBUSY;
163 default:
164 dprintk("%s could not handle NFSv4 error %d\n",
165 __func__, -err);
166 break;
167 }
168 return -EIO;
169 }
170
171 /*
172 * This is our standard bitmap for GETATTR requests.
173 */
174 const u32 nfs4_fattr_bitmap[3] = {
175 FATTR4_WORD0_TYPE
176 | FATTR4_WORD0_CHANGE
177 | FATTR4_WORD0_SIZE
178 | FATTR4_WORD0_FSID
179 | FATTR4_WORD0_FILEID,
180 FATTR4_WORD1_MODE
181 | FATTR4_WORD1_NUMLINKS
182 | FATTR4_WORD1_OWNER
183 | FATTR4_WORD1_OWNER_GROUP
184 | FATTR4_WORD1_RAWDEV
185 | FATTR4_WORD1_SPACE_USED
186 | FATTR4_WORD1_TIME_ACCESS
187 | FATTR4_WORD1_TIME_METADATA
188 | FATTR4_WORD1_TIME_MODIFY
189 | FATTR4_WORD1_MOUNTED_ON_FILEID,
190 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
191 FATTR4_WORD2_SECURITY_LABEL
192 #endif
193 };
194
195 static const u32 nfs4_pnfs_open_bitmap[3] = {
196 FATTR4_WORD0_TYPE
197 | FATTR4_WORD0_CHANGE
198 | FATTR4_WORD0_SIZE
199 | FATTR4_WORD0_FSID
200 | FATTR4_WORD0_FILEID,
201 FATTR4_WORD1_MODE
202 | FATTR4_WORD1_NUMLINKS
203 | FATTR4_WORD1_OWNER
204 | FATTR4_WORD1_OWNER_GROUP
205 | FATTR4_WORD1_RAWDEV
206 | FATTR4_WORD1_SPACE_USED
207 | FATTR4_WORD1_TIME_ACCESS
208 | FATTR4_WORD1_TIME_METADATA
209 | FATTR4_WORD1_TIME_MODIFY,
210 FATTR4_WORD2_MDSTHRESHOLD
211 };
212
213 static const u32 nfs4_open_noattr_bitmap[3] = {
214 FATTR4_WORD0_TYPE
215 | FATTR4_WORD0_CHANGE
216 | FATTR4_WORD0_FILEID,
217 };
218
219 const u32 nfs4_statfs_bitmap[3] = {
220 FATTR4_WORD0_FILES_AVAIL
221 | FATTR4_WORD0_FILES_FREE
222 | FATTR4_WORD0_FILES_TOTAL,
223 FATTR4_WORD1_SPACE_AVAIL
224 | FATTR4_WORD1_SPACE_FREE
225 | FATTR4_WORD1_SPACE_TOTAL
226 };
227
228 const u32 nfs4_pathconf_bitmap[3] = {
229 FATTR4_WORD0_MAXLINK
230 | FATTR4_WORD0_MAXNAME,
231 0
232 };
233
234 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
235 | FATTR4_WORD0_MAXREAD
236 | FATTR4_WORD0_MAXWRITE
237 | FATTR4_WORD0_LEASE_TIME,
238 FATTR4_WORD1_TIME_DELTA
239 | FATTR4_WORD1_FS_LAYOUT_TYPES,
240 FATTR4_WORD2_LAYOUT_BLKSIZE
241 };
242
243 const u32 nfs4_fs_locations_bitmap[3] = {
244 FATTR4_WORD0_TYPE
245 | FATTR4_WORD0_CHANGE
246 | FATTR4_WORD0_SIZE
247 | FATTR4_WORD0_FSID
248 | FATTR4_WORD0_FILEID
249 | FATTR4_WORD0_FS_LOCATIONS,
250 FATTR4_WORD1_MODE
251 | FATTR4_WORD1_NUMLINKS
252 | FATTR4_WORD1_OWNER
253 | FATTR4_WORD1_OWNER_GROUP
254 | FATTR4_WORD1_RAWDEV
255 | FATTR4_WORD1_SPACE_USED
256 | FATTR4_WORD1_TIME_ACCESS
257 | FATTR4_WORD1_TIME_METADATA
258 | FATTR4_WORD1_TIME_MODIFY
259 | FATTR4_WORD1_MOUNTED_ON_FILEID,
260 };
261
262 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
263 struct nfs4_readdir_arg *readdir)
264 {
265 __be32 *start, *p;
266
267 if (cookie > 2) {
268 readdir->cookie = cookie;
269 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
270 return;
271 }
272
273 readdir->cookie = 0;
274 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
275 if (cookie == 2)
276 return;
277
278 /*
279 * NFSv4 servers do not return entries for '.' and '..'
280 * Therefore, we fake these entries here. We let '.'
281 * have cookie 0 and '..' have cookie 1. Note that
282 * when talking to the server, we always send cookie 0
283 * instead of 1 or 2.
284 */
285 start = p = kmap_atomic(*readdir->pages);
286
287 if (cookie == 0) {
288 *p++ = xdr_one; /* next */
289 *p++ = xdr_zero; /* cookie, first word */
290 *p++ = xdr_one; /* cookie, second word */
291 *p++ = xdr_one; /* entry len */
292 memcpy(p, ".\0\0\0", 4); /* entry */
293 p++;
294 *p++ = xdr_one; /* bitmap length */
295 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
296 *p++ = htonl(8); /* attribute buffer length */
297 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
298 }
299
300 *p++ = xdr_one; /* next */
301 *p++ = xdr_zero; /* cookie, first word */
302 *p++ = xdr_two; /* cookie, second word */
303 *p++ = xdr_two; /* entry len */
304 memcpy(p, "..\0\0", 4); /* entry */
305 p++;
306 *p++ = xdr_one; /* bitmap length */
307 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
308 *p++ = htonl(8); /* attribute buffer length */
309 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
310
311 readdir->pgbase = (char *)p - (char *)start;
312 readdir->count -= readdir->pgbase;
313 kunmap_atomic(start);
314 }
315
316 static long nfs4_update_delay(long *timeout)
317 {
318 long ret;
319 if (!timeout)
320 return NFS4_POLL_RETRY_MAX;
321 if (*timeout <= 0)
322 *timeout = NFS4_POLL_RETRY_MIN;
323 if (*timeout > NFS4_POLL_RETRY_MAX)
324 *timeout = NFS4_POLL_RETRY_MAX;
325 ret = *timeout;
326 *timeout <<= 1;
327 return ret;
328 }
329
330 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
331 {
332 int res = 0;
333
334 might_sleep();
335
336 freezable_schedule_timeout_killable_unsafe(
337 nfs4_update_delay(timeout));
338 if (fatal_signal_pending(current))
339 res = -ERESTARTSYS;
340 return res;
341 }
342
343 /* This is the error handling routine for processes that are allowed
344 * to sleep.
345 */
346 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
347 {
348 struct nfs_client *clp = server->nfs_client;
349 struct nfs4_state *state = exception->state;
350 struct inode *inode = exception->inode;
351 int ret = errorcode;
352
353 exception->retry = 0;
354 switch(errorcode) {
355 case 0:
356 return 0;
357 case -NFS4ERR_OPENMODE:
358 if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
359 nfs4_inode_return_delegation(inode);
360 exception->retry = 1;
361 return 0;
362 }
363 if (state == NULL)
364 break;
365 ret = nfs4_schedule_stateid_recovery(server, state);
366 if (ret < 0)
367 break;
368 goto wait_on_recovery;
369 case -NFS4ERR_DELEG_REVOKED:
370 case -NFS4ERR_ADMIN_REVOKED:
371 case -NFS4ERR_BAD_STATEID:
372 if (state == NULL)
373 break;
374 ret = nfs4_schedule_stateid_recovery(server, state);
375 if (ret < 0)
376 break;
377 goto wait_on_recovery;
378 case -NFS4ERR_EXPIRED:
379 if (state != NULL) {
380 ret = nfs4_schedule_stateid_recovery(server, state);
381 if (ret < 0)
382 break;
383 }
384 case -NFS4ERR_STALE_STATEID:
385 case -NFS4ERR_STALE_CLIENTID:
386 nfs4_schedule_lease_recovery(clp);
387 goto wait_on_recovery;
388 case -NFS4ERR_MOVED:
389 ret = nfs4_schedule_migration_recovery(server);
390 if (ret < 0)
391 break;
392 goto wait_on_recovery;
393 case -NFS4ERR_LEASE_MOVED:
394 nfs4_schedule_lease_moved_recovery(clp);
395 goto wait_on_recovery;
396 #if defined(CONFIG_NFS_V4_1)
397 case -NFS4ERR_BADSESSION:
398 case -NFS4ERR_BADSLOT:
399 case -NFS4ERR_BAD_HIGH_SLOT:
400 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
401 case -NFS4ERR_DEADSESSION:
402 case -NFS4ERR_SEQ_FALSE_RETRY:
403 case -NFS4ERR_SEQ_MISORDERED:
404 dprintk("%s ERROR: %d Reset session\n", __func__,
405 errorcode);
406 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
407 goto wait_on_recovery;
408 #endif /* defined(CONFIG_NFS_V4_1) */
409 case -NFS4ERR_FILE_OPEN:
410 if (exception->timeout > HZ) {
411 /* We have retried a decent amount, time to
412 * fail
413 */
414 ret = -EBUSY;
415 break;
416 }
417 case -NFS4ERR_GRACE:
418 case -NFS4ERR_DELAY:
419 ret = nfs4_delay(server->client, &exception->timeout);
420 if (ret != 0)
421 break;
422 case -NFS4ERR_RETRY_UNCACHED_REP:
423 case -NFS4ERR_OLD_STATEID:
424 exception->retry = 1;
425 break;
426 case -NFS4ERR_BADOWNER:
427 /* The following works around a Linux server bug! */
428 case -NFS4ERR_BADNAME:
429 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
430 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
431 exception->retry = 1;
432 printk(KERN_WARNING "NFS: v4 server %s "
433 "does not accept raw "
434 "uid/gids. "
435 "Reenabling the idmapper.\n",
436 server->nfs_client->cl_hostname);
437 }
438 }
439 /* We failed to handle the error */
440 return nfs4_map_errors(ret);
441 wait_on_recovery:
442 ret = nfs4_wait_clnt_recover(clp);
443 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
444 return -EIO;
445 if (ret == 0)
446 exception->retry = 1;
447 return ret;
448 }
449
450 /*
451 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
452 * or 'false' otherwise.
453 */
454 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
455 {
456 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
457
458 if (flavor == RPC_AUTH_GSS_KRB5I ||
459 flavor == RPC_AUTH_GSS_KRB5P)
460 return true;
461
462 return false;
463 }
464
465 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
466 {
467 spin_lock(&clp->cl_lock);
468 if (time_before(clp->cl_last_renewal,timestamp))
469 clp->cl_last_renewal = timestamp;
470 spin_unlock(&clp->cl_lock);
471 }
472
473 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
474 {
475 do_renew_lease(server->nfs_client, timestamp);
476 }
477
478 struct nfs4_call_sync_data {
479 const struct nfs_server *seq_server;
480 struct nfs4_sequence_args *seq_args;
481 struct nfs4_sequence_res *seq_res;
482 };
483
484 static void nfs4_init_sequence(struct nfs4_sequence_args *args,
485 struct nfs4_sequence_res *res, int cache_reply)
486 {
487 args->sa_slot = NULL;
488 args->sa_cache_this = cache_reply;
489 args->sa_privileged = 0;
490
491 res->sr_slot = NULL;
492 }
493
494 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
495 {
496 args->sa_privileged = 1;
497 }
498
499 int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
500 struct nfs4_sequence_args *args,
501 struct nfs4_sequence_res *res,
502 struct rpc_task *task)
503 {
504 struct nfs4_slot *slot;
505
506 /* slot already allocated? */
507 if (res->sr_slot != NULL)
508 goto out_start;
509
510 spin_lock(&tbl->slot_tbl_lock);
511 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
512 goto out_sleep;
513
514 slot = nfs4_alloc_slot(tbl);
515 if (IS_ERR(slot)) {
516 if (slot == ERR_PTR(-ENOMEM))
517 task->tk_timeout = HZ >> 2;
518 goto out_sleep;
519 }
520 spin_unlock(&tbl->slot_tbl_lock);
521
522 args->sa_slot = slot;
523 res->sr_slot = slot;
524
525 out_start:
526 rpc_call_start(task);
527 return 0;
528
529 out_sleep:
530 if (args->sa_privileged)
531 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
532 NULL, RPC_PRIORITY_PRIVILEGED);
533 else
534 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
535 spin_unlock(&tbl->slot_tbl_lock);
536 return -EAGAIN;
537 }
538 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
539
540 static int nfs40_sequence_done(struct rpc_task *task,
541 struct nfs4_sequence_res *res)
542 {
543 struct nfs4_slot *slot = res->sr_slot;
544 struct nfs4_slot_table *tbl;
545
546 if (slot == NULL)
547 goto out;
548
549 tbl = slot->table;
550 spin_lock(&tbl->slot_tbl_lock);
551 if (!nfs41_wake_and_assign_slot(tbl, slot))
552 nfs4_free_slot(tbl, slot);
553 spin_unlock(&tbl->slot_tbl_lock);
554
555 res->sr_slot = NULL;
556 out:
557 return 1;
558 }
559
560 #if defined(CONFIG_NFS_V4_1)
561
562 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
563 {
564 struct nfs4_session *session;
565 struct nfs4_slot_table *tbl;
566 struct nfs4_slot *slot = res->sr_slot;
567 bool send_new_highest_used_slotid = false;
568
569 tbl = slot->table;
570 session = tbl->session;
571
572 spin_lock(&tbl->slot_tbl_lock);
573 /* Be nice to the server: try to ensure that the last transmitted
574 * value for highest_user_slotid <= target_highest_slotid
575 */
576 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
577 send_new_highest_used_slotid = true;
578
579 if (nfs41_wake_and_assign_slot(tbl, slot)) {
580 send_new_highest_used_slotid = false;
581 goto out_unlock;
582 }
583 nfs4_free_slot(tbl, slot);
584
585 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
586 send_new_highest_used_slotid = false;
587 out_unlock:
588 spin_unlock(&tbl->slot_tbl_lock);
589 res->sr_slot = NULL;
590 if (send_new_highest_used_slotid)
591 nfs41_server_notify_highest_slotid_update(session->clp);
592 }
593
594 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
595 {
596 struct nfs4_session *session;
597 struct nfs4_slot *slot = res->sr_slot;
598 struct nfs_client *clp;
599 bool interrupted = false;
600 int ret = 1;
601
602 if (slot == NULL)
603 goto out_noaction;
604 /* don't increment the sequence number if the task wasn't sent */
605 if (!RPC_WAS_SENT(task))
606 goto out;
607
608 session = slot->table->session;
609
610 if (slot->interrupted) {
611 slot->interrupted = 0;
612 interrupted = true;
613 }
614
615 trace_nfs4_sequence_done(session, res);
616 /* Check the SEQUENCE operation status */
617 switch (res->sr_status) {
618 case 0:
619 /* Update the slot's sequence and clientid lease timer */
620 ++slot->seq_nr;
621 clp = session->clp;
622 do_renew_lease(clp, res->sr_timestamp);
623 /* Check sequence flags */
624 if (res->sr_status_flags != 0)
625 nfs4_schedule_lease_recovery(clp);
626 nfs41_update_target_slotid(slot->table, slot, res);
627 break;
628 case 1:
629 /*
630 * sr_status remains 1 if an RPC level error occurred.
631 * The server may or may not have processed the sequence
632 * operation..
633 * Mark the slot as having hosted an interrupted RPC call.
634 */
635 slot->interrupted = 1;
636 goto out;
637 case -NFS4ERR_DELAY:
638 /* The server detected a resend of the RPC call and
639 * returned NFS4ERR_DELAY as per Section 2.10.6.2
640 * of RFC5661.
641 */
642 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
643 __func__,
644 slot->slot_nr,
645 slot->seq_nr);
646 goto out_retry;
647 case -NFS4ERR_BADSLOT:
648 /*
649 * The slot id we used was probably retired. Try again
650 * using a different slot id.
651 */
652 goto retry_nowait;
653 case -NFS4ERR_SEQ_MISORDERED:
654 /*
655 * Was the last operation on this sequence interrupted?
656 * If so, retry after bumping the sequence number.
657 */
658 if (interrupted) {
659 ++slot->seq_nr;
660 goto retry_nowait;
661 }
662 /*
663 * Could this slot have been previously retired?
664 * If so, then the server may be expecting seq_nr = 1!
665 */
666 if (slot->seq_nr != 1) {
667 slot->seq_nr = 1;
668 goto retry_nowait;
669 }
670 break;
671 case -NFS4ERR_SEQ_FALSE_RETRY:
672 ++slot->seq_nr;
673 goto retry_nowait;
674 default:
675 /* Just update the slot sequence no. */
676 ++slot->seq_nr;
677 }
678 out:
679 /* The session may be reset by one of the error handlers. */
680 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
681 nfs41_sequence_free_slot(res);
682 out_noaction:
683 return ret;
684 retry_nowait:
685 if (rpc_restart_call_prepare(task)) {
686 task->tk_status = 0;
687 ret = 0;
688 }
689 goto out;
690 out_retry:
691 if (!rpc_restart_call(task))
692 goto out;
693 rpc_delay(task, NFS4_POLL_RETRY_MAX);
694 return 0;
695 }
696 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
697
698 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
699 {
700 if (res->sr_slot == NULL)
701 return 1;
702 if (!res->sr_slot->table->session)
703 return nfs40_sequence_done(task, res);
704 return nfs41_sequence_done(task, res);
705 }
706 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
707
708 int nfs41_setup_sequence(struct nfs4_session *session,
709 struct nfs4_sequence_args *args,
710 struct nfs4_sequence_res *res,
711 struct rpc_task *task)
712 {
713 struct nfs4_slot *slot;
714 struct nfs4_slot_table *tbl;
715
716 dprintk("--> %s\n", __func__);
717 /* slot already allocated? */
718 if (res->sr_slot != NULL)
719 goto out_success;
720
721 tbl = &session->fc_slot_table;
722
723 task->tk_timeout = 0;
724
725 spin_lock(&tbl->slot_tbl_lock);
726 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
727 !args->sa_privileged) {
728 /* The state manager will wait until the slot table is empty */
729 dprintk("%s session is draining\n", __func__);
730 goto out_sleep;
731 }
732
733 slot = nfs4_alloc_slot(tbl);
734 if (IS_ERR(slot)) {
735 /* If out of memory, try again in 1/4 second */
736 if (slot == ERR_PTR(-ENOMEM))
737 task->tk_timeout = HZ >> 2;
738 dprintk("<-- %s: no free slots\n", __func__);
739 goto out_sleep;
740 }
741 spin_unlock(&tbl->slot_tbl_lock);
742
743 args->sa_slot = slot;
744
745 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
746 slot->slot_nr, slot->seq_nr);
747
748 res->sr_slot = slot;
749 res->sr_timestamp = jiffies;
750 res->sr_status_flags = 0;
751 /*
752 * sr_status is only set in decode_sequence, and so will remain
753 * set to 1 if an rpc level failure occurs.
754 */
755 res->sr_status = 1;
756 trace_nfs4_setup_sequence(session, args);
757 out_success:
758 rpc_call_start(task);
759 return 0;
760 out_sleep:
761 /* Privileged tasks are queued with top priority */
762 if (args->sa_privileged)
763 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
764 NULL, RPC_PRIORITY_PRIVILEGED);
765 else
766 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
767 spin_unlock(&tbl->slot_tbl_lock);
768 return -EAGAIN;
769 }
770 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
771
772 static int nfs4_setup_sequence(const struct nfs_server *server,
773 struct nfs4_sequence_args *args,
774 struct nfs4_sequence_res *res,
775 struct rpc_task *task)
776 {
777 struct nfs4_session *session = nfs4_get_session(server);
778 int ret = 0;
779
780 if (!session)
781 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
782 args, res, task);
783
784 dprintk("--> %s clp %p session %p sr_slot %u\n",
785 __func__, session->clp, session, res->sr_slot ?
786 res->sr_slot->slot_nr : NFS4_NO_SLOT);
787
788 ret = nfs41_setup_sequence(session, args, res, task);
789
790 dprintk("<-- %s status=%d\n", __func__, ret);
791 return ret;
792 }
793
794 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
795 {
796 struct nfs4_call_sync_data *data = calldata;
797 struct nfs4_session *session = nfs4_get_session(data->seq_server);
798
799 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
800
801 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
802 }
803
804 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
805 {
806 struct nfs4_call_sync_data *data = calldata;
807
808 nfs41_sequence_done(task, data->seq_res);
809 }
810
811 static const struct rpc_call_ops nfs41_call_sync_ops = {
812 .rpc_call_prepare = nfs41_call_sync_prepare,
813 .rpc_call_done = nfs41_call_sync_done,
814 };
815
816 #else /* !CONFIG_NFS_V4_1 */
817
818 static int nfs4_setup_sequence(const struct nfs_server *server,
819 struct nfs4_sequence_args *args,
820 struct nfs4_sequence_res *res,
821 struct rpc_task *task)
822 {
823 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
824 args, res, task);
825 }
826
827 int nfs4_sequence_done(struct rpc_task *task,
828 struct nfs4_sequence_res *res)
829 {
830 return nfs40_sequence_done(task, res);
831 }
832 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
833
834 #endif /* !CONFIG_NFS_V4_1 */
835
836 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
837 {
838 struct nfs4_call_sync_data *data = calldata;
839 nfs4_setup_sequence(data->seq_server,
840 data->seq_args, data->seq_res, task);
841 }
842
843 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
844 {
845 struct nfs4_call_sync_data *data = calldata;
846 nfs4_sequence_done(task, data->seq_res);
847 }
848
849 static const struct rpc_call_ops nfs40_call_sync_ops = {
850 .rpc_call_prepare = nfs40_call_sync_prepare,
851 .rpc_call_done = nfs40_call_sync_done,
852 };
853
854 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
855 struct nfs_server *server,
856 struct rpc_message *msg,
857 struct nfs4_sequence_args *args,
858 struct nfs4_sequence_res *res)
859 {
860 int ret;
861 struct rpc_task *task;
862 struct nfs_client *clp = server->nfs_client;
863 struct nfs4_call_sync_data data = {
864 .seq_server = server,
865 .seq_args = args,
866 .seq_res = res,
867 };
868 struct rpc_task_setup task_setup = {
869 .rpc_client = clnt,
870 .rpc_message = msg,
871 .callback_ops = clp->cl_mvops->call_sync_ops,
872 .callback_data = &data
873 };
874
875 task = rpc_run_task(&task_setup);
876 if (IS_ERR(task))
877 ret = PTR_ERR(task);
878 else {
879 ret = task->tk_status;
880 rpc_put_task(task);
881 }
882 return ret;
883 }
884
885 int nfs4_call_sync(struct rpc_clnt *clnt,
886 struct nfs_server *server,
887 struct rpc_message *msg,
888 struct nfs4_sequence_args *args,
889 struct nfs4_sequence_res *res,
890 int cache_reply)
891 {
892 nfs4_init_sequence(args, res, cache_reply);
893 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
894 }
895
896 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
897 {
898 struct nfs_inode *nfsi = NFS_I(dir);
899
900 spin_lock(&dir->i_lock);
901 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
902 if (!cinfo->atomic || cinfo->before != dir->i_version)
903 nfs_force_lookup_revalidate(dir);
904 dir->i_version = cinfo->after;
905 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
906 nfs_fscache_invalidate(dir);
907 spin_unlock(&dir->i_lock);
908 }
909
910 struct nfs4_opendata {
911 struct kref kref;
912 struct nfs_openargs o_arg;
913 struct nfs_openres o_res;
914 struct nfs_open_confirmargs c_arg;
915 struct nfs_open_confirmres c_res;
916 struct nfs4_string owner_name;
917 struct nfs4_string group_name;
918 struct nfs_fattr f_attr;
919 struct nfs4_label *f_label;
920 struct dentry *dir;
921 struct dentry *dentry;
922 struct nfs4_state_owner *owner;
923 struct nfs4_state *state;
924 struct iattr attrs;
925 unsigned long timestamp;
926 unsigned int rpc_done : 1;
927 unsigned int file_created : 1;
928 unsigned int is_recover : 1;
929 int rpc_status;
930 int cancelled;
931 };
932
933 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
934 int err, struct nfs4_exception *exception)
935 {
936 if (err != -EINVAL)
937 return false;
938 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
939 return false;
940 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
941 exception->retry = 1;
942 return true;
943 }
944
945 static u32
946 nfs4_map_atomic_open_share(struct nfs_server *server,
947 fmode_t fmode, int openflags)
948 {
949 u32 res = 0;
950
951 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
952 case FMODE_READ:
953 res = NFS4_SHARE_ACCESS_READ;
954 break;
955 case FMODE_WRITE:
956 res = NFS4_SHARE_ACCESS_WRITE;
957 break;
958 case FMODE_READ|FMODE_WRITE:
959 res = NFS4_SHARE_ACCESS_BOTH;
960 }
961 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
962 goto out;
963 /* Want no delegation if we're using O_DIRECT */
964 if (openflags & O_DIRECT)
965 res |= NFS4_SHARE_WANT_NO_DELEG;
966 out:
967 return res;
968 }
969
970 static enum open_claim_type4
971 nfs4_map_atomic_open_claim(struct nfs_server *server,
972 enum open_claim_type4 claim)
973 {
974 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
975 return claim;
976 switch (claim) {
977 default:
978 return claim;
979 case NFS4_OPEN_CLAIM_FH:
980 return NFS4_OPEN_CLAIM_NULL;
981 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
982 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
983 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
984 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
985 }
986 }
987
988 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
989 {
990 p->o_res.f_attr = &p->f_attr;
991 p->o_res.f_label = p->f_label;
992 p->o_res.seqid = p->o_arg.seqid;
993 p->c_res.seqid = p->c_arg.seqid;
994 p->o_res.server = p->o_arg.server;
995 p->o_res.access_request = p->o_arg.access;
996 nfs_fattr_init(&p->f_attr);
997 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
998 }
999
1000 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1001 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1002 const struct iattr *attrs,
1003 struct nfs4_label *label,
1004 enum open_claim_type4 claim,
1005 gfp_t gfp_mask)
1006 {
1007 struct dentry *parent = dget_parent(dentry);
1008 struct inode *dir = parent->d_inode;
1009 struct nfs_server *server = NFS_SERVER(dir);
1010 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1011 struct nfs4_opendata *p;
1012
1013 p = kzalloc(sizeof(*p), gfp_mask);
1014 if (p == NULL)
1015 goto err;
1016
1017 p->f_label = nfs4_label_alloc(server, gfp_mask);
1018 if (IS_ERR(p->f_label))
1019 goto err_free_p;
1020
1021 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1022 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1023 if (IS_ERR(p->o_arg.seqid))
1024 goto err_free_label;
1025 nfs_sb_active(dentry->d_sb);
1026 p->dentry = dget(dentry);
1027 p->dir = parent;
1028 p->owner = sp;
1029 atomic_inc(&sp->so_count);
1030 p->o_arg.open_flags = flags;
1031 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1032 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1033 fmode, flags);
1034 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1035 * will return permission denied for all bits until close */
1036 if (!(flags & O_EXCL)) {
1037 /* ask server to check for all possible rights as results
1038 * are cached */
1039 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1040 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1041 }
1042 p->o_arg.clientid = server->nfs_client->cl_clientid;
1043 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1044 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1045 p->o_arg.name = &dentry->d_name;
1046 p->o_arg.server = server;
1047 p->o_arg.bitmask = nfs4_bitmask(server, label);
1048 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1049 p->o_arg.label = label;
1050 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1051 switch (p->o_arg.claim) {
1052 case NFS4_OPEN_CLAIM_NULL:
1053 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1054 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1055 p->o_arg.fh = NFS_FH(dir);
1056 break;
1057 case NFS4_OPEN_CLAIM_PREVIOUS:
1058 case NFS4_OPEN_CLAIM_FH:
1059 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1060 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1061 p->o_arg.fh = NFS_FH(dentry->d_inode);
1062 }
1063 if (attrs != NULL && attrs->ia_valid != 0) {
1064 __u32 verf[2];
1065
1066 p->o_arg.u.attrs = &p->attrs;
1067 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1068
1069 verf[0] = jiffies;
1070 verf[1] = current->pid;
1071 memcpy(p->o_arg.u.verifier.data, verf,
1072 sizeof(p->o_arg.u.verifier.data));
1073 }
1074 p->c_arg.fh = &p->o_res.fh;
1075 p->c_arg.stateid = &p->o_res.stateid;
1076 p->c_arg.seqid = p->o_arg.seqid;
1077 nfs4_init_opendata_res(p);
1078 kref_init(&p->kref);
1079 return p;
1080
1081 err_free_label:
1082 nfs4_label_free(p->f_label);
1083 err_free_p:
1084 kfree(p);
1085 err:
1086 dput(parent);
1087 return NULL;
1088 }
1089
1090 static void nfs4_opendata_free(struct kref *kref)
1091 {
1092 struct nfs4_opendata *p = container_of(kref,
1093 struct nfs4_opendata, kref);
1094 struct super_block *sb = p->dentry->d_sb;
1095
1096 nfs_free_seqid(p->o_arg.seqid);
1097 if (p->state != NULL)
1098 nfs4_put_open_state(p->state);
1099 nfs4_put_state_owner(p->owner);
1100
1101 nfs4_label_free(p->f_label);
1102
1103 dput(p->dir);
1104 dput(p->dentry);
1105 nfs_sb_deactive(sb);
1106 nfs_fattr_free_names(&p->f_attr);
1107 kfree(p->f_attr.mdsthreshold);
1108 kfree(p);
1109 }
1110
1111 static void nfs4_opendata_put(struct nfs4_opendata *p)
1112 {
1113 if (p != NULL)
1114 kref_put(&p->kref, nfs4_opendata_free);
1115 }
1116
1117 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1118 {
1119 int ret;
1120
1121 ret = rpc_wait_for_completion_task(task);
1122 return ret;
1123 }
1124
1125 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1126 {
1127 int ret = 0;
1128
1129 if (open_mode & (O_EXCL|O_TRUNC))
1130 goto out;
1131 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1132 case FMODE_READ:
1133 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1134 && state->n_rdonly != 0;
1135 break;
1136 case FMODE_WRITE:
1137 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1138 && state->n_wronly != 0;
1139 break;
1140 case FMODE_READ|FMODE_WRITE:
1141 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1142 && state->n_rdwr != 0;
1143 }
1144 out:
1145 return ret;
1146 }
1147
1148 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
1149 {
1150 if (delegation == NULL)
1151 return 0;
1152 if ((delegation->type & fmode) != fmode)
1153 return 0;
1154 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1155 return 0;
1156 nfs_mark_delegation_referenced(delegation);
1157 return 1;
1158 }
1159
1160 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1161 {
1162 switch (fmode) {
1163 case FMODE_WRITE:
1164 state->n_wronly++;
1165 break;
1166 case FMODE_READ:
1167 state->n_rdonly++;
1168 break;
1169 case FMODE_READ|FMODE_WRITE:
1170 state->n_rdwr++;
1171 }
1172 nfs4_state_set_mode_locked(state, state->state | fmode);
1173 }
1174
1175 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1176 {
1177 struct nfs_client *clp = state->owner->so_server->nfs_client;
1178 bool need_recover = false;
1179
1180 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1181 need_recover = true;
1182 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1183 need_recover = true;
1184 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1185 need_recover = true;
1186 if (need_recover)
1187 nfs4_state_mark_reclaim_nograce(clp, state);
1188 }
1189
1190 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1191 nfs4_stateid *stateid)
1192 {
1193 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1194 return true;
1195 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1196 nfs_test_and_clear_all_open_stateid(state);
1197 return true;
1198 }
1199 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1200 return true;
1201 return false;
1202 }
1203
1204 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1205 {
1206 if (state->n_wronly)
1207 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1208 if (state->n_rdonly)
1209 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1210 if (state->n_rdwr)
1211 set_bit(NFS_O_RDWR_STATE, &state->flags);
1212 }
1213
1214 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1215 nfs4_stateid *stateid, fmode_t fmode)
1216 {
1217 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1218 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1219 case FMODE_WRITE:
1220 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1221 break;
1222 case FMODE_READ:
1223 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1224 break;
1225 case 0:
1226 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1227 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1228 clear_bit(NFS_OPEN_STATE, &state->flags);
1229 }
1230 if (stateid == NULL)
1231 return;
1232 /* Handle races with OPEN */
1233 if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
1234 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1235 nfs_resync_open_stateid_locked(state);
1236 return;
1237 }
1238 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1239 nfs4_stateid_copy(&state->stateid, stateid);
1240 nfs4_stateid_copy(&state->open_stateid, stateid);
1241 }
1242
1243 static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1244 {
1245 write_seqlock(&state->seqlock);
1246 nfs_clear_open_stateid_locked(state, stateid, fmode);
1247 write_sequnlock(&state->seqlock);
1248 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1249 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1250 }
1251
1252 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1253 {
1254 switch (fmode) {
1255 case FMODE_READ:
1256 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1257 break;
1258 case FMODE_WRITE:
1259 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1260 break;
1261 case FMODE_READ|FMODE_WRITE:
1262 set_bit(NFS_O_RDWR_STATE, &state->flags);
1263 }
1264 if (!nfs_need_update_open_stateid(state, stateid))
1265 return;
1266 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1267 nfs4_stateid_copy(&state->stateid, stateid);
1268 nfs4_stateid_copy(&state->open_stateid, stateid);
1269 }
1270
1271 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1272 {
1273 /*
1274 * Protect the call to nfs4_state_set_mode_locked and
1275 * serialise the stateid update
1276 */
1277 write_seqlock(&state->seqlock);
1278 if (deleg_stateid != NULL) {
1279 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1280 set_bit(NFS_DELEGATED_STATE, &state->flags);
1281 }
1282 if (open_stateid != NULL)
1283 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1284 write_sequnlock(&state->seqlock);
1285 spin_lock(&state->owner->so_lock);
1286 update_open_stateflags(state, fmode);
1287 spin_unlock(&state->owner->so_lock);
1288 }
1289
1290 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1291 {
1292 struct nfs_inode *nfsi = NFS_I(state->inode);
1293 struct nfs_delegation *deleg_cur;
1294 int ret = 0;
1295
1296 fmode &= (FMODE_READ|FMODE_WRITE);
1297
1298 rcu_read_lock();
1299 deleg_cur = rcu_dereference(nfsi->delegation);
1300 if (deleg_cur == NULL)
1301 goto no_delegation;
1302
1303 spin_lock(&deleg_cur->lock);
1304 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1305 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1306 (deleg_cur->type & fmode) != fmode)
1307 goto no_delegation_unlock;
1308
1309 if (delegation == NULL)
1310 delegation = &deleg_cur->stateid;
1311 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1312 goto no_delegation_unlock;
1313
1314 nfs_mark_delegation_referenced(deleg_cur);
1315 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1316 ret = 1;
1317 no_delegation_unlock:
1318 spin_unlock(&deleg_cur->lock);
1319 no_delegation:
1320 rcu_read_unlock();
1321
1322 if (!ret && open_stateid != NULL) {
1323 __update_open_stateid(state, open_stateid, NULL, fmode);
1324 ret = 1;
1325 }
1326 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1327 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1328
1329 return ret;
1330 }
1331
1332 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1333 const nfs4_stateid *stateid)
1334 {
1335 struct nfs4_state *state = lsp->ls_state;
1336 bool ret = false;
1337
1338 spin_lock(&state->state_lock);
1339 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1340 goto out_noupdate;
1341 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1342 goto out_noupdate;
1343 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1344 ret = true;
1345 out_noupdate:
1346 spin_unlock(&state->state_lock);
1347 return ret;
1348 }
1349
1350 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1351 {
1352 struct nfs_delegation *delegation;
1353
1354 rcu_read_lock();
1355 delegation = rcu_dereference(NFS_I(inode)->delegation);
1356 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1357 rcu_read_unlock();
1358 return;
1359 }
1360 rcu_read_unlock();
1361 nfs4_inode_return_delegation(inode);
1362 }
1363
1364 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1365 {
1366 struct nfs4_state *state = opendata->state;
1367 struct nfs_inode *nfsi = NFS_I(state->inode);
1368 struct nfs_delegation *delegation;
1369 int open_mode = opendata->o_arg.open_flags;
1370 fmode_t fmode = opendata->o_arg.fmode;
1371 nfs4_stateid stateid;
1372 int ret = -EAGAIN;
1373
1374 for (;;) {
1375 spin_lock(&state->owner->so_lock);
1376 if (can_open_cached(state, fmode, open_mode)) {
1377 update_open_stateflags(state, fmode);
1378 spin_unlock(&state->owner->so_lock);
1379 goto out_return_state;
1380 }
1381 spin_unlock(&state->owner->so_lock);
1382 rcu_read_lock();
1383 delegation = rcu_dereference(nfsi->delegation);
1384 if (!can_open_delegated(delegation, fmode)) {
1385 rcu_read_unlock();
1386 break;
1387 }
1388 /* Save the delegation */
1389 nfs4_stateid_copy(&stateid, &delegation->stateid);
1390 rcu_read_unlock();
1391 nfs_release_seqid(opendata->o_arg.seqid);
1392 if (!opendata->is_recover) {
1393 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1394 if (ret != 0)
1395 goto out;
1396 }
1397 ret = -EAGAIN;
1398
1399 /* Try to update the stateid using the delegation */
1400 if (update_open_stateid(state, NULL, &stateid, fmode))
1401 goto out_return_state;
1402 }
1403 out:
1404 return ERR_PTR(ret);
1405 out_return_state:
1406 atomic_inc(&state->count);
1407 return state;
1408 }
1409
1410 static void
1411 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1412 {
1413 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1414 struct nfs_delegation *delegation;
1415 int delegation_flags = 0;
1416
1417 rcu_read_lock();
1418 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1419 if (delegation)
1420 delegation_flags = delegation->flags;
1421 rcu_read_unlock();
1422 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1423 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1424 "returning a delegation for "
1425 "OPEN(CLAIM_DELEGATE_CUR)\n",
1426 clp->cl_hostname);
1427 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1428 nfs_inode_set_delegation(state->inode,
1429 data->owner->so_cred,
1430 &data->o_res);
1431 else
1432 nfs_inode_reclaim_delegation(state->inode,
1433 data->owner->so_cred,
1434 &data->o_res);
1435 }
1436
1437 /*
1438 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1439 * and update the nfs4_state.
1440 */
1441 static struct nfs4_state *
1442 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1443 {
1444 struct inode *inode = data->state->inode;
1445 struct nfs4_state *state = data->state;
1446 int ret;
1447
1448 if (!data->rpc_done) {
1449 if (data->rpc_status) {
1450 ret = data->rpc_status;
1451 goto err;
1452 }
1453 /* cached opens have already been processed */
1454 goto update;
1455 }
1456
1457 ret = nfs_refresh_inode(inode, &data->f_attr);
1458 if (ret)
1459 goto err;
1460
1461 if (data->o_res.delegation_type != 0)
1462 nfs4_opendata_check_deleg(data, state);
1463 update:
1464 update_open_stateid(state, &data->o_res.stateid, NULL,
1465 data->o_arg.fmode);
1466 atomic_inc(&state->count);
1467
1468 return state;
1469 err:
1470 return ERR_PTR(ret);
1471
1472 }
1473
1474 static struct nfs4_state *
1475 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1476 {
1477 struct inode *inode;
1478 struct nfs4_state *state = NULL;
1479 int ret;
1480
1481 if (!data->rpc_done) {
1482 state = nfs4_try_open_cached(data);
1483 goto out;
1484 }
1485
1486 ret = -EAGAIN;
1487 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1488 goto err;
1489 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1490 ret = PTR_ERR(inode);
1491 if (IS_ERR(inode))
1492 goto err;
1493 ret = -ENOMEM;
1494 state = nfs4_get_open_state(inode, data->owner);
1495 if (state == NULL)
1496 goto err_put_inode;
1497 if (data->o_res.delegation_type != 0)
1498 nfs4_opendata_check_deleg(data, state);
1499 update_open_stateid(state, &data->o_res.stateid, NULL,
1500 data->o_arg.fmode);
1501 iput(inode);
1502 out:
1503 nfs_release_seqid(data->o_arg.seqid);
1504 return state;
1505 err_put_inode:
1506 iput(inode);
1507 err:
1508 return ERR_PTR(ret);
1509 }
1510
1511 static struct nfs4_state *
1512 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1513 {
1514 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1515 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1516 return _nfs4_opendata_to_nfs4_state(data);
1517 }
1518
1519 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1520 {
1521 struct nfs_inode *nfsi = NFS_I(state->inode);
1522 struct nfs_open_context *ctx;
1523
1524 spin_lock(&state->inode->i_lock);
1525 list_for_each_entry(ctx, &nfsi->open_files, list) {
1526 if (ctx->state != state)
1527 continue;
1528 get_nfs_open_context(ctx);
1529 spin_unlock(&state->inode->i_lock);
1530 return ctx;
1531 }
1532 spin_unlock(&state->inode->i_lock);
1533 return ERR_PTR(-ENOENT);
1534 }
1535
1536 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1537 struct nfs4_state *state, enum open_claim_type4 claim)
1538 {
1539 struct nfs4_opendata *opendata;
1540
1541 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1542 NULL, NULL, claim, GFP_NOFS);
1543 if (opendata == NULL)
1544 return ERR_PTR(-ENOMEM);
1545 opendata->state = state;
1546 atomic_inc(&state->count);
1547 return opendata;
1548 }
1549
1550 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1551 {
1552 struct nfs4_state *newstate;
1553 int ret;
1554
1555 opendata->o_arg.open_flags = 0;
1556 opendata->o_arg.fmode = fmode;
1557 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1558 NFS_SB(opendata->dentry->d_sb),
1559 fmode, 0);
1560 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1561 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1562 nfs4_init_opendata_res(opendata);
1563 ret = _nfs4_recover_proc_open(opendata);
1564 if (ret != 0)
1565 return ret;
1566 newstate = nfs4_opendata_to_nfs4_state(opendata);
1567 if (IS_ERR(newstate))
1568 return PTR_ERR(newstate);
1569 nfs4_close_state(newstate, fmode);
1570 *res = newstate;
1571 return 0;
1572 }
1573
1574 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1575 {
1576 struct nfs4_state *newstate;
1577 int ret;
1578
1579 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1580 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1581 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1582 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1583 /* memory barrier prior to reading state->n_* */
1584 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1585 clear_bit(NFS_OPEN_STATE, &state->flags);
1586 smp_rmb();
1587 if (state->n_rdwr != 0) {
1588 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1589 if (ret != 0)
1590 return ret;
1591 if (newstate != state)
1592 return -ESTALE;
1593 }
1594 if (state->n_wronly != 0) {
1595 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1596 if (ret != 0)
1597 return ret;
1598 if (newstate != state)
1599 return -ESTALE;
1600 }
1601 if (state->n_rdonly != 0) {
1602 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1603 if (ret != 0)
1604 return ret;
1605 if (newstate != state)
1606 return -ESTALE;
1607 }
1608 /*
1609 * We may have performed cached opens for all three recoveries.
1610 * Check if we need to update the current stateid.
1611 */
1612 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1613 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1614 write_seqlock(&state->seqlock);
1615 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1616 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1617 write_sequnlock(&state->seqlock);
1618 }
1619 return 0;
1620 }
1621
1622 /*
1623 * OPEN_RECLAIM:
1624 * reclaim state on the server after a reboot.
1625 */
1626 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1627 {
1628 struct nfs_delegation *delegation;
1629 struct nfs4_opendata *opendata;
1630 fmode_t delegation_type = 0;
1631 int status;
1632
1633 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1634 NFS4_OPEN_CLAIM_PREVIOUS);
1635 if (IS_ERR(opendata))
1636 return PTR_ERR(opendata);
1637 rcu_read_lock();
1638 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1639 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1640 delegation_type = delegation->type;
1641 rcu_read_unlock();
1642 opendata->o_arg.u.delegation_type = delegation_type;
1643 status = nfs4_open_recover(opendata, state);
1644 nfs4_opendata_put(opendata);
1645 return status;
1646 }
1647
1648 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1649 {
1650 struct nfs_server *server = NFS_SERVER(state->inode);
1651 struct nfs4_exception exception = { };
1652 int err;
1653 do {
1654 err = _nfs4_do_open_reclaim(ctx, state);
1655 trace_nfs4_open_reclaim(ctx, 0, err);
1656 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1657 continue;
1658 if (err != -NFS4ERR_DELAY)
1659 break;
1660 nfs4_handle_exception(server, err, &exception);
1661 } while (exception.retry);
1662 return err;
1663 }
1664
1665 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1666 {
1667 struct nfs_open_context *ctx;
1668 int ret;
1669
1670 ctx = nfs4_state_find_open_context(state);
1671 if (IS_ERR(ctx))
1672 return -EAGAIN;
1673 ret = nfs4_do_open_reclaim(ctx, state);
1674 put_nfs_open_context(ctx);
1675 return ret;
1676 }
1677
1678 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1679 {
1680 switch (err) {
1681 default:
1682 printk(KERN_ERR "NFS: %s: unhandled error "
1683 "%d.\n", __func__, err);
1684 case 0:
1685 case -ENOENT:
1686 case -ESTALE:
1687 break;
1688 case -NFS4ERR_BADSESSION:
1689 case -NFS4ERR_BADSLOT:
1690 case -NFS4ERR_BAD_HIGH_SLOT:
1691 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1692 case -NFS4ERR_DEADSESSION:
1693 set_bit(NFS_DELEGATED_STATE, &state->flags);
1694 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1695 return -EAGAIN;
1696 case -NFS4ERR_STALE_CLIENTID:
1697 case -NFS4ERR_STALE_STATEID:
1698 set_bit(NFS_DELEGATED_STATE, &state->flags);
1699 case -NFS4ERR_EXPIRED:
1700 /* Don't recall a delegation if it was lost */
1701 nfs4_schedule_lease_recovery(server->nfs_client);
1702 return -EAGAIN;
1703 case -NFS4ERR_MOVED:
1704 nfs4_schedule_migration_recovery(server);
1705 return -EAGAIN;
1706 case -NFS4ERR_LEASE_MOVED:
1707 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1708 return -EAGAIN;
1709 case -NFS4ERR_DELEG_REVOKED:
1710 case -NFS4ERR_ADMIN_REVOKED:
1711 case -NFS4ERR_BAD_STATEID:
1712 case -NFS4ERR_OPENMODE:
1713 nfs_inode_find_state_and_recover(state->inode,
1714 stateid);
1715 nfs4_schedule_stateid_recovery(server, state);
1716 return -EAGAIN;
1717 case -NFS4ERR_DELAY:
1718 case -NFS4ERR_GRACE:
1719 set_bit(NFS_DELEGATED_STATE, &state->flags);
1720 ssleep(1);
1721 return -EAGAIN;
1722 case -ENOMEM:
1723 case -NFS4ERR_DENIED:
1724 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1725 return 0;
1726 }
1727 return err;
1728 }
1729
1730 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1731 {
1732 struct nfs_server *server = NFS_SERVER(state->inode);
1733 struct nfs4_opendata *opendata;
1734 int err;
1735
1736 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1737 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1738 if (IS_ERR(opendata))
1739 return PTR_ERR(opendata);
1740 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1741 err = nfs4_open_recover(opendata, state);
1742 nfs4_opendata_put(opendata);
1743 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1744 }
1745
1746 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1747 {
1748 struct nfs4_opendata *data = calldata;
1749
1750 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1751 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1752 }
1753
1754 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1755 {
1756 struct nfs4_opendata *data = calldata;
1757
1758 nfs40_sequence_done(task, &data->c_res.seq_res);
1759
1760 data->rpc_status = task->tk_status;
1761 if (data->rpc_status == 0) {
1762 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1763 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1764 renew_lease(data->o_res.server, data->timestamp);
1765 data->rpc_done = 1;
1766 }
1767 }
1768
1769 static void nfs4_open_confirm_release(void *calldata)
1770 {
1771 struct nfs4_opendata *data = calldata;
1772 struct nfs4_state *state = NULL;
1773
1774 /* If this request hasn't been cancelled, do nothing */
1775 if (data->cancelled == 0)
1776 goto out_free;
1777 /* In case of error, no cleanup! */
1778 if (!data->rpc_done)
1779 goto out_free;
1780 state = nfs4_opendata_to_nfs4_state(data);
1781 if (!IS_ERR(state))
1782 nfs4_close_state(state, data->o_arg.fmode);
1783 out_free:
1784 nfs4_opendata_put(data);
1785 }
1786
1787 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1788 .rpc_call_prepare = nfs4_open_confirm_prepare,
1789 .rpc_call_done = nfs4_open_confirm_done,
1790 .rpc_release = nfs4_open_confirm_release,
1791 };
1792
1793 /*
1794 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1795 */
1796 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1797 {
1798 struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1799 struct rpc_task *task;
1800 struct rpc_message msg = {
1801 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1802 .rpc_argp = &data->c_arg,
1803 .rpc_resp = &data->c_res,
1804 .rpc_cred = data->owner->so_cred,
1805 };
1806 struct rpc_task_setup task_setup_data = {
1807 .rpc_client = server->client,
1808 .rpc_message = &msg,
1809 .callback_ops = &nfs4_open_confirm_ops,
1810 .callback_data = data,
1811 .workqueue = nfsiod_workqueue,
1812 .flags = RPC_TASK_ASYNC,
1813 };
1814 int status;
1815
1816 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1817 kref_get(&data->kref);
1818 data->rpc_done = 0;
1819 data->rpc_status = 0;
1820 data->timestamp = jiffies;
1821 task = rpc_run_task(&task_setup_data);
1822 if (IS_ERR(task))
1823 return PTR_ERR(task);
1824 status = nfs4_wait_for_completion_rpc_task(task);
1825 if (status != 0) {
1826 data->cancelled = 1;
1827 smp_wmb();
1828 } else
1829 status = data->rpc_status;
1830 rpc_put_task(task);
1831 return status;
1832 }
1833
1834 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1835 {
1836 struct nfs4_opendata *data = calldata;
1837 struct nfs4_state_owner *sp = data->owner;
1838 struct nfs_client *clp = sp->so_server->nfs_client;
1839
1840 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1841 goto out_wait;
1842 /*
1843 * Check if we still need to send an OPEN call, or if we can use
1844 * a delegation instead.
1845 */
1846 if (data->state != NULL) {
1847 struct nfs_delegation *delegation;
1848
1849 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1850 goto out_no_action;
1851 rcu_read_lock();
1852 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1853 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1854 data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH &&
1855 can_open_delegated(delegation, data->o_arg.fmode))
1856 goto unlock_no_action;
1857 rcu_read_unlock();
1858 }
1859 /* Update client id. */
1860 data->o_arg.clientid = clp->cl_clientid;
1861 switch (data->o_arg.claim) {
1862 case NFS4_OPEN_CLAIM_PREVIOUS:
1863 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1864 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1865 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1866 case NFS4_OPEN_CLAIM_FH:
1867 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1868 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1869 }
1870 data->timestamp = jiffies;
1871 if (nfs4_setup_sequence(data->o_arg.server,
1872 &data->o_arg.seq_args,
1873 &data->o_res.seq_res,
1874 task) != 0)
1875 nfs_release_seqid(data->o_arg.seqid);
1876
1877 /* Set the create mode (note dependency on the session type) */
1878 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
1879 if (data->o_arg.open_flags & O_EXCL) {
1880 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
1881 if (nfs4_has_persistent_session(clp))
1882 data->o_arg.createmode = NFS4_CREATE_GUARDED;
1883 else if (clp->cl_mvops->minor_version > 0)
1884 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
1885 }
1886 return;
1887 unlock_no_action:
1888 rcu_read_unlock();
1889 out_no_action:
1890 task->tk_action = NULL;
1891 out_wait:
1892 nfs4_sequence_done(task, &data->o_res.seq_res);
1893 }
1894
1895 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1896 {
1897 struct nfs4_opendata *data = calldata;
1898
1899 data->rpc_status = task->tk_status;
1900
1901 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1902 return;
1903
1904 if (task->tk_status == 0) {
1905 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
1906 switch (data->o_res.f_attr->mode & S_IFMT) {
1907 case S_IFREG:
1908 break;
1909 case S_IFLNK:
1910 data->rpc_status = -ELOOP;
1911 break;
1912 case S_IFDIR:
1913 data->rpc_status = -EISDIR;
1914 break;
1915 default:
1916 data->rpc_status = -ENOTDIR;
1917 }
1918 }
1919 renew_lease(data->o_res.server, data->timestamp);
1920 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1921 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1922 }
1923 data->rpc_done = 1;
1924 }
1925
1926 static void nfs4_open_release(void *calldata)
1927 {
1928 struct nfs4_opendata *data = calldata;
1929 struct nfs4_state *state = NULL;
1930
1931 /* If this request hasn't been cancelled, do nothing */
1932 if (data->cancelled == 0)
1933 goto out_free;
1934 /* In case of error, no cleanup! */
1935 if (data->rpc_status != 0 || !data->rpc_done)
1936 goto out_free;
1937 /* In case we need an open_confirm, no cleanup! */
1938 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1939 goto out_free;
1940 state = nfs4_opendata_to_nfs4_state(data);
1941 if (!IS_ERR(state))
1942 nfs4_close_state(state, data->o_arg.fmode);
1943 out_free:
1944 nfs4_opendata_put(data);
1945 }
1946
1947 static const struct rpc_call_ops nfs4_open_ops = {
1948 .rpc_call_prepare = nfs4_open_prepare,
1949 .rpc_call_done = nfs4_open_done,
1950 .rpc_release = nfs4_open_release,
1951 };
1952
1953 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1954 {
1955 struct inode *dir = data->dir->d_inode;
1956 struct nfs_server *server = NFS_SERVER(dir);
1957 struct nfs_openargs *o_arg = &data->o_arg;
1958 struct nfs_openres *o_res = &data->o_res;
1959 struct rpc_task *task;
1960 struct rpc_message msg = {
1961 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1962 .rpc_argp = o_arg,
1963 .rpc_resp = o_res,
1964 .rpc_cred = data->owner->so_cred,
1965 };
1966 struct rpc_task_setup task_setup_data = {
1967 .rpc_client = server->client,
1968 .rpc_message = &msg,
1969 .callback_ops = &nfs4_open_ops,
1970 .callback_data = data,
1971 .workqueue = nfsiod_workqueue,
1972 .flags = RPC_TASK_ASYNC,
1973 };
1974 int status;
1975
1976 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1977 kref_get(&data->kref);
1978 data->rpc_done = 0;
1979 data->rpc_status = 0;
1980 data->cancelled = 0;
1981 data->is_recover = 0;
1982 if (isrecover) {
1983 nfs4_set_sequence_privileged(&o_arg->seq_args);
1984 data->is_recover = 1;
1985 }
1986 task = rpc_run_task(&task_setup_data);
1987 if (IS_ERR(task))
1988 return PTR_ERR(task);
1989 status = nfs4_wait_for_completion_rpc_task(task);
1990 if (status != 0) {
1991 data->cancelled = 1;
1992 smp_wmb();
1993 } else
1994 status = data->rpc_status;
1995 rpc_put_task(task);
1996
1997 return status;
1998 }
1999
2000 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2001 {
2002 struct inode *dir = data->dir->d_inode;
2003 struct nfs_openres *o_res = &data->o_res;
2004 int status;
2005
2006 status = nfs4_run_open_task(data, 1);
2007 if (status != 0 || !data->rpc_done)
2008 return status;
2009
2010 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2011
2012 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2013 status = _nfs4_proc_open_confirm(data);
2014 if (status != 0)
2015 return status;
2016 }
2017
2018 return status;
2019 }
2020
2021 /*
2022 * Additional permission checks in order to distinguish between an
2023 * open for read, and an open for execute. This works around the
2024 * fact that NFSv4 OPEN treats read and execute permissions as being
2025 * the same.
2026 * Note that in the non-execute case, we want to turn off permission
2027 * checking if we just created a new file (POSIX open() semantics).
2028 */
2029 static int nfs4_opendata_access(struct rpc_cred *cred,
2030 struct nfs4_opendata *opendata,
2031 struct nfs4_state *state, fmode_t fmode,
2032 int openflags)
2033 {
2034 struct nfs_access_entry cache;
2035 u32 mask;
2036
2037 /* access call failed or for some reason the server doesn't
2038 * support any access modes -- defer access call until later */
2039 if (opendata->o_res.access_supported == 0)
2040 return 0;
2041
2042 mask = 0;
2043 /*
2044 * Use openflags to check for exec, because fmode won't
2045 * always have FMODE_EXEC set when file open for exec.
2046 */
2047 if (openflags & __FMODE_EXEC) {
2048 /* ONLY check for exec rights */
2049 mask = MAY_EXEC;
2050 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2051 mask = MAY_READ;
2052
2053 cache.cred = cred;
2054 cache.jiffies = jiffies;
2055 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2056 nfs_access_add_cache(state->inode, &cache);
2057
2058 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2059 return 0;
2060
2061 /* even though OPEN succeeded, access is denied. Close the file */
2062 nfs4_close_state(state, fmode);
2063 return -EACCES;
2064 }
2065
2066 /*
2067 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2068 */
2069 static int _nfs4_proc_open(struct nfs4_opendata *data)
2070 {
2071 struct inode *dir = data->dir->d_inode;
2072 struct nfs_server *server = NFS_SERVER(dir);
2073 struct nfs_openargs *o_arg = &data->o_arg;
2074 struct nfs_openres *o_res = &data->o_res;
2075 int status;
2076
2077 status = nfs4_run_open_task(data, 0);
2078 if (!data->rpc_done)
2079 return status;
2080 if (status != 0) {
2081 if (status == -NFS4ERR_BADNAME &&
2082 !(o_arg->open_flags & O_CREAT))
2083 return -ENOENT;
2084 return status;
2085 }
2086
2087 nfs_fattr_map_and_free_names(server, &data->f_attr);
2088
2089 if (o_arg->open_flags & O_CREAT) {
2090 update_changeattr(dir, &o_res->cinfo);
2091 if (o_arg->open_flags & O_EXCL)
2092 data->file_created = 1;
2093 else if (o_res->cinfo.before != o_res->cinfo.after)
2094 data->file_created = 1;
2095 }
2096 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2097 server->caps &= ~NFS_CAP_POSIX_LOCK;
2098 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2099 status = _nfs4_proc_open_confirm(data);
2100 if (status != 0)
2101 return status;
2102 }
2103 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2104 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2105 return 0;
2106 }
2107
2108 static int nfs4_recover_expired_lease(struct nfs_server *server)
2109 {
2110 return nfs4_client_recover_expired_lease(server->nfs_client);
2111 }
2112
2113 /*
2114 * OPEN_EXPIRED:
2115 * reclaim state on the server after a network partition.
2116 * Assumes caller holds the appropriate lock
2117 */
2118 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2119 {
2120 struct nfs4_opendata *opendata;
2121 int ret;
2122
2123 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2124 NFS4_OPEN_CLAIM_FH);
2125 if (IS_ERR(opendata))
2126 return PTR_ERR(opendata);
2127 ret = nfs4_open_recover(opendata, state);
2128 if (ret == -ESTALE)
2129 d_drop(ctx->dentry);
2130 nfs4_opendata_put(opendata);
2131 return ret;
2132 }
2133
2134 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2135 {
2136 struct nfs_server *server = NFS_SERVER(state->inode);
2137 struct nfs4_exception exception = { };
2138 int err;
2139
2140 do {
2141 err = _nfs4_open_expired(ctx, state);
2142 trace_nfs4_open_expired(ctx, 0, err);
2143 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2144 continue;
2145 switch (err) {
2146 default:
2147 goto out;
2148 case -NFS4ERR_GRACE:
2149 case -NFS4ERR_DELAY:
2150 nfs4_handle_exception(server, err, &exception);
2151 err = 0;
2152 }
2153 } while (exception.retry);
2154 out:
2155 return err;
2156 }
2157
2158 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2159 {
2160 struct nfs_open_context *ctx;
2161 int ret;
2162
2163 ctx = nfs4_state_find_open_context(state);
2164 if (IS_ERR(ctx))
2165 return -EAGAIN;
2166 ret = nfs4_do_open_expired(ctx, state);
2167 put_nfs_open_context(ctx);
2168 return ret;
2169 }
2170
2171 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2172 {
2173 nfs_remove_bad_delegation(state->inode);
2174 write_seqlock(&state->seqlock);
2175 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2176 write_sequnlock(&state->seqlock);
2177 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2178 }
2179
2180 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2181 {
2182 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2183 nfs_finish_clear_delegation_stateid(state);
2184 }
2185
2186 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2187 {
2188 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2189 nfs40_clear_delegation_stateid(state);
2190 return nfs4_open_expired(sp, state);
2191 }
2192
2193 #if defined(CONFIG_NFS_V4_1)
2194 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2195 {
2196 struct nfs_server *server = NFS_SERVER(state->inode);
2197 nfs4_stateid stateid;
2198 struct nfs_delegation *delegation;
2199 struct rpc_cred *cred;
2200 int status;
2201
2202 /* Get the delegation credential for use by test/free_stateid */
2203 rcu_read_lock();
2204 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2205 if (delegation == NULL) {
2206 rcu_read_unlock();
2207 return;
2208 }
2209
2210 nfs4_stateid_copy(&stateid, &delegation->stateid);
2211 cred = get_rpccred(delegation->cred);
2212 rcu_read_unlock();
2213 status = nfs41_test_stateid(server, &stateid, cred);
2214 trace_nfs4_test_delegation_stateid(state, NULL, status);
2215
2216 if (status != NFS_OK) {
2217 /* Free the stateid unless the server explicitly
2218 * informs us the stateid is unrecognized. */
2219 if (status != -NFS4ERR_BAD_STATEID)
2220 nfs41_free_stateid(server, &stateid, cred);
2221 nfs_finish_clear_delegation_stateid(state);
2222 }
2223
2224 put_rpccred(cred);
2225 }
2226
2227 /**
2228 * nfs41_check_open_stateid - possibly free an open stateid
2229 *
2230 * @state: NFSv4 state for an inode
2231 *
2232 * Returns NFS_OK if recovery for this stateid is now finished.
2233 * Otherwise a negative NFS4ERR value is returned.
2234 */
2235 static int nfs41_check_open_stateid(struct nfs4_state *state)
2236 {
2237 struct nfs_server *server = NFS_SERVER(state->inode);
2238 nfs4_stateid *stateid = &state->open_stateid;
2239 struct rpc_cred *cred = state->owner->so_cred;
2240 int status;
2241
2242 /* If a state reset has been done, test_stateid is unneeded */
2243 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2244 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2245 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2246 return -NFS4ERR_BAD_STATEID;
2247
2248 status = nfs41_test_stateid(server, stateid, cred);
2249 trace_nfs4_test_open_stateid(state, NULL, status);
2250 if (status != NFS_OK) {
2251 /* Free the stateid unless the server explicitly
2252 * informs us the stateid is unrecognized. */
2253 if (status != -NFS4ERR_BAD_STATEID)
2254 nfs41_free_stateid(server, stateid, cred);
2255
2256 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2257 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2258 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2259 clear_bit(NFS_OPEN_STATE, &state->flags);
2260 }
2261 return status;
2262 }
2263
2264 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2265 {
2266 int status;
2267
2268 nfs41_check_delegation_stateid(state);
2269 status = nfs41_check_open_stateid(state);
2270 if (status != NFS_OK)
2271 status = nfs4_open_expired(sp, state);
2272 return status;
2273 }
2274 #endif
2275
2276 /*
2277 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2278 * fields corresponding to attributes that were used to store the verifier.
2279 * Make sure we clobber those fields in the later setattr call
2280 */
2281 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
2282 {
2283 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2284 !(sattr->ia_valid & ATTR_ATIME_SET))
2285 sattr->ia_valid |= ATTR_ATIME;
2286
2287 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2288 !(sattr->ia_valid & ATTR_MTIME_SET))
2289 sattr->ia_valid |= ATTR_MTIME;
2290 }
2291
2292 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2293 fmode_t fmode,
2294 int flags,
2295 struct nfs_open_context *ctx)
2296 {
2297 struct nfs4_state_owner *sp = opendata->owner;
2298 struct nfs_server *server = sp->so_server;
2299 struct dentry *dentry;
2300 struct nfs4_state *state;
2301 unsigned int seq;
2302 int ret;
2303
2304 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2305
2306 ret = _nfs4_proc_open(opendata);
2307 if (ret != 0)
2308 goto out;
2309
2310 state = nfs4_opendata_to_nfs4_state(opendata);
2311 ret = PTR_ERR(state);
2312 if (IS_ERR(state))
2313 goto out;
2314 if (server->caps & NFS_CAP_POSIX_LOCK)
2315 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2316
2317 dentry = opendata->dentry;
2318 if (dentry->d_inode == NULL) {
2319 /* FIXME: Is this d_drop() ever needed? */
2320 d_drop(dentry);
2321 dentry = d_add_unique(dentry, igrab(state->inode));
2322 if (dentry == NULL) {
2323 dentry = opendata->dentry;
2324 } else if (dentry != ctx->dentry) {
2325 dput(ctx->dentry);
2326 ctx->dentry = dget(dentry);
2327 }
2328 nfs_set_verifier(dentry,
2329 nfs_save_change_attribute(opendata->dir->d_inode));
2330 }
2331
2332 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2333 if (ret != 0)
2334 goto out;
2335
2336 ctx->state = state;
2337 if (dentry->d_inode == state->inode) {
2338 nfs_inode_attach_open_context(ctx);
2339 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2340 nfs4_schedule_stateid_recovery(server, state);
2341 }
2342 out:
2343 return ret;
2344 }
2345
2346 /*
2347 * Returns a referenced nfs4_state
2348 */
2349 static int _nfs4_do_open(struct inode *dir,
2350 struct nfs_open_context *ctx,
2351 int flags,
2352 struct iattr *sattr,
2353 struct nfs4_label *label,
2354 int *opened)
2355 {
2356 struct nfs4_state_owner *sp;
2357 struct nfs4_state *state = NULL;
2358 struct nfs_server *server = NFS_SERVER(dir);
2359 struct nfs4_opendata *opendata;
2360 struct dentry *dentry = ctx->dentry;
2361 struct rpc_cred *cred = ctx->cred;
2362 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2363 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2364 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2365 struct nfs4_label *olabel = NULL;
2366 int status;
2367
2368 /* Protect against reboot recovery conflicts */
2369 status = -ENOMEM;
2370 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2371 if (sp == NULL) {
2372 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2373 goto out_err;
2374 }
2375 status = nfs4_recover_expired_lease(server);
2376 if (status != 0)
2377 goto err_put_state_owner;
2378 if (dentry->d_inode != NULL)
2379 nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
2380 status = -ENOMEM;
2381 if (dentry->d_inode)
2382 claim = NFS4_OPEN_CLAIM_FH;
2383 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2384 label, claim, GFP_KERNEL);
2385 if (opendata == NULL)
2386 goto err_put_state_owner;
2387
2388 if (label) {
2389 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2390 if (IS_ERR(olabel)) {
2391 status = PTR_ERR(olabel);
2392 goto err_opendata_put;
2393 }
2394 }
2395
2396 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2397 if (!opendata->f_attr.mdsthreshold) {
2398 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2399 if (!opendata->f_attr.mdsthreshold)
2400 goto err_free_label;
2401 }
2402 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2403 }
2404 if (dentry->d_inode != NULL)
2405 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
2406
2407 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2408 if (status != 0)
2409 goto err_free_label;
2410 state = ctx->state;
2411
2412 if ((opendata->o_arg.open_flags & O_EXCL) &&
2413 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2414 nfs4_exclusive_attrset(opendata, sattr);
2415
2416 nfs_fattr_init(opendata->o_res.f_attr);
2417 status = nfs4_do_setattr(state->inode, cred,
2418 opendata->o_res.f_attr, sattr,
2419 state, label, olabel);
2420 if (status == 0) {
2421 nfs_setattr_update_inode(state->inode, sattr,
2422 opendata->o_res.f_attr);
2423 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2424 }
2425 }
2426 if (opendata->file_created)
2427 *opened |= FILE_CREATED;
2428
2429 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2430 *ctx_th = opendata->f_attr.mdsthreshold;
2431 opendata->f_attr.mdsthreshold = NULL;
2432 }
2433
2434 nfs4_label_free(olabel);
2435
2436 nfs4_opendata_put(opendata);
2437 nfs4_put_state_owner(sp);
2438 return 0;
2439 err_free_label:
2440 nfs4_label_free(olabel);
2441 err_opendata_put:
2442 nfs4_opendata_put(opendata);
2443 err_put_state_owner:
2444 nfs4_put_state_owner(sp);
2445 out_err:
2446 return status;
2447 }
2448
2449
2450 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2451 struct nfs_open_context *ctx,
2452 int flags,
2453 struct iattr *sattr,
2454 struct nfs4_label *label,
2455 int *opened)
2456 {
2457 struct nfs_server *server = NFS_SERVER(dir);
2458 struct nfs4_exception exception = { };
2459 struct nfs4_state *res;
2460 int status;
2461
2462 do {
2463 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2464 res = ctx->state;
2465 trace_nfs4_open_file(ctx, flags, status);
2466 if (status == 0)
2467 break;
2468 /* NOTE: BAD_SEQID means the server and client disagree about the
2469 * book-keeping w.r.t. state-changing operations
2470 * (OPEN/CLOSE/LOCK/LOCKU...)
2471 * It is actually a sign of a bug on the client or on the server.
2472 *
2473 * If we receive a BAD_SEQID error in the particular case of
2474 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2475 * have unhashed the old state_owner for us, and that we can
2476 * therefore safely retry using a new one. We should still warn
2477 * the user though...
2478 */
2479 if (status == -NFS4ERR_BAD_SEQID) {
2480 pr_warn_ratelimited("NFS: v4 server %s "
2481 " returned a bad sequence-id error!\n",
2482 NFS_SERVER(dir)->nfs_client->cl_hostname);
2483 exception.retry = 1;
2484 continue;
2485 }
2486 /*
2487 * BAD_STATEID on OPEN means that the server cancelled our
2488 * state before it received the OPEN_CONFIRM.
2489 * Recover by retrying the request as per the discussion
2490 * on Page 181 of RFC3530.
2491 */
2492 if (status == -NFS4ERR_BAD_STATEID) {
2493 exception.retry = 1;
2494 continue;
2495 }
2496 if (status == -EAGAIN) {
2497 /* We must have found a delegation */
2498 exception.retry = 1;
2499 continue;
2500 }
2501 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2502 continue;
2503 res = ERR_PTR(nfs4_handle_exception(server,
2504 status, &exception));
2505 } while (exception.retry);
2506 return res;
2507 }
2508
2509 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2510 struct nfs_fattr *fattr, struct iattr *sattr,
2511 struct nfs4_state *state, struct nfs4_label *ilabel,
2512 struct nfs4_label *olabel)
2513 {
2514 struct nfs_server *server = NFS_SERVER(inode);
2515 struct nfs_setattrargs arg = {
2516 .fh = NFS_FH(inode),
2517 .iap = sattr,
2518 .server = server,
2519 .bitmask = server->attr_bitmask,
2520 .label = ilabel,
2521 };
2522 struct nfs_setattrres res = {
2523 .fattr = fattr,
2524 .label = olabel,
2525 .server = server,
2526 };
2527 struct rpc_message msg = {
2528 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2529 .rpc_argp = &arg,
2530 .rpc_resp = &res,
2531 .rpc_cred = cred,
2532 };
2533 unsigned long timestamp = jiffies;
2534 fmode_t fmode;
2535 bool truncate;
2536 int status;
2537
2538 arg.bitmask = nfs4_bitmask(server, ilabel);
2539 if (ilabel)
2540 arg.bitmask = nfs4_bitmask(server, olabel);
2541
2542 nfs_fattr_init(fattr);
2543
2544 /* Servers should only apply open mode checks for file size changes */
2545 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2546 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2547
2548 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2549 /* Use that stateid */
2550 } else if (truncate && state != NULL) {
2551 struct nfs_lockowner lockowner = {
2552 .l_owner = current->files,
2553 .l_pid = current->tgid,
2554 };
2555 if (!nfs4_valid_open_stateid(state))
2556 return -EBADF;
2557 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2558 &lockowner) == -EIO)
2559 return -EBADF;
2560 } else
2561 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2562
2563 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2564 if (status == 0 && state != NULL)
2565 renew_lease(server, timestamp);
2566 return status;
2567 }
2568
2569 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2570 struct nfs_fattr *fattr, struct iattr *sattr,
2571 struct nfs4_state *state, struct nfs4_label *ilabel,
2572 struct nfs4_label *olabel)
2573 {
2574 struct nfs_server *server = NFS_SERVER(inode);
2575 struct nfs4_exception exception = {
2576 .state = state,
2577 .inode = inode,
2578 };
2579 int err;
2580 do {
2581 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
2582 trace_nfs4_setattr(inode, err);
2583 switch (err) {
2584 case -NFS4ERR_OPENMODE:
2585 if (!(sattr->ia_valid & ATTR_SIZE)) {
2586 pr_warn_once("NFSv4: server %s is incorrectly "
2587 "applying open mode checks to "
2588 "a SETATTR that is not "
2589 "changing file size.\n",
2590 server->nfs_client->cl_hostname);
2591 }
2592 if (state && !(state->state & FMODE_WRITE)) {
2593 err = -EBADF;
2594 if (sattr->ia_valid & ATTR_OPEN)
2595 err = -EACCES;
2596 goto out;
2597 }
2598 }
2599 err = nfs4_handle_exception(server, err, &exception);
2600 } while (exception.retry);
2601 out:
2602 return err;
2603 }
2604
2605 struct nfs4_closedata {
2606 struct inode *inode;
2607 struct nfs4_state *state;
2608 struct nfs_closeargs arg;
2609 struct nfs_closeres res;
2610 struct nfs_fattr fattr;
2611 unsigned long timestamp;
2612 bool roc;
2613 u32 roc_barrier;
2614 };
2615
2616 static void nfs4_free_closedata(void *data)
2617 {
2618 struct nfs4_closedata *calldata = data;
2619 struct nfs4_state_owner *sp = calldata->state->owner;
2620 struct super_block *sb = calldata->state->inode->i_sb;
2621
2622 if (calldata->roc)
2623 pnfs_roc_release(calldata->state->inode);
2624 nfs4_put_open_state(calldata->state);
2625 nfs_free_seqid(calldata->arg.seqid);
2626 nfs4_put_state_owner(sp);
2627 nfs_sb_deactive(sb);
2628 kfree(calldata);
2629 }
2630
2631 static void nfs4_close_done(struct rpc_task *task, void *data)
2632 {
2633 struct nfs4_closedata *calldata = data;
2634 struct nfs4_state *state = calldata->state;
2635 struct nfs_server *server = NFS_SERVER(calldata->inode);
2636 nfs4_stateid *res_stateid = NULL;
2637
2638 dprintk("%s: begin!\n", __func__);
2639 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2640 return;
2641 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2642 /* hmm. we are done with the inode, and in the process of freeing
2643 * the state_owner. we keep this around to process errors
2644 */
2645 switch (task->tk_status) {
2646 case 0:
2647 res_stateid = &calldata->res.stateid;
2648 if (calldata->arg.fmode == 0 && calldata->roc)
2649 pnfs_roc_set_barrier(state->inode,
2650 calldata->roc_barrier);
2651 renew_lease(server, calldata->timestamp);
2652 break;
2653 case -NFS4ERR_ADMIN_REVOKED:
2654 case -NFS4ERR_STALE_STATEID:
2655 case -NFS4ERR_OLD_STATEID:
2656 case -NFS4ERR_BAD_STATEID:
2657 case -NFS4ERR_EXPIRED:
2658 if (!nfs4_stateid_match(&calldata->arg.stateid,
2659 &state->open_stateid)) {
2660 rpc_restart_call_prepare(task);
2661 goto out_release;
2662 }
2663 if (calldata->arg.fmode == 0)
2664 break;
2665 default:
2666 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2667 rpc_restart_call_prepare(task);
2668 goto out_release;
2669 }
2670 }
2671 nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode);
2672 out_release:
2673 nfs_release_seqid(calldata->arg.seqid);
2674 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2675 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2676 }
2677
2678 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2679 {
2680 struct nfs4_closedata *calldata = data;
2681 struct nfs4_state *state = calldata->state;
2682 struct inode *inode = calldata->inode;
2683 bool is_rdonly, is_wronly, is_rdwr;
2684 int call_close = 0;
2685
2686 dprintk("%s: begin!\n", __func__);
2687 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2688 goto out_wait;
2689
2690 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2691 spin_lock(&state->owner->so_lock);
2692 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2693 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2694 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2695 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2696 /* Calculate the change in open mode */
2697 calldata->arg.fmode = 0;
2698 if (state->n_rdwr == 0) {
2699 if (state->n_rdonly == 0)
2700 call_close |= is_rdonly;
2701 else if (is_rdonly)
2702 calldata->arg.fmode |= FMODE_READ;
2703 if (state->n_wronly == 0)
2704 call_close |= is_wronly;
2705 else if (is_wronly)
2706 calldata->arg.fmode |= FMODE_WRITE;
2707 } else if (is_rdwr)
2708 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2709
2710 if (calldata->arg.fmode == 0)
2711 call_close |= is_rdwr;
2712
2713 if (!nfs4_valid_open_stateid(state))
2714 call_close = 0;
2715 spin_unlock(&state->owner->so_lock);
2716
2717 if (!call_close) {
2718 /* Note: exit _without_ calling nfs4_close_done */
2719 goto out_no_action;
2720 }
2721
2722 if (calldata->arg.fmode == 0) {
2723 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2724 if (calldata->roc &&
2725 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) {
2726 nfs_release_seqid(calldata->arg.seqid);
2727 goto out_wait;
2728 }
2729 }
2730 calldata->arg.share_access =
2731 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2732 calldata->arg.fmode, 0);
2733
2734 nfs_fattr_init(calldata->res.fattr);
2735 calldata->timestamp = jiffies;
2736 if (nfs4_setup_sequence(NFS_SERVER(inode),
2737 &calldata->arg.seq_args,
2738 &calldata->res.seq_res,
2739 task) != 0)
2740 nfs_release_seqid(calldata->arg.seqid);
2741 dprintk("%s: done!\n", __func__);
2742 return;
2743 out_no_action:
2744 task->tk_action = NULL;
2745 out_wait:
2746 nfs4_sequence_done(task, &calldata->res.seq_res);
2747 }
2748
2749 static const struct rpc_call_ops nfs4_close_ops = {
2750 .rpc_call_prepare = nfs4_close_prepare,
2751 .rpc_call_done = nfs4_close_done,
2752 .rpc_release = nfs4_free_closedata,
2753 };
2754
2755 static bool nfs4_roc(struct inode *inode)
2756 {
2757 if (!nfs_have_layout(inode))
2758 return false;
2759 return pnfs_roc(inode);
2760 }
2761
2762 /*
2763 * It is possible for data to be read/written from a mem-mapped file
2764 * after the sys_close call (which hits the vfs layer as a flush).
2765 * This means that we can't safely call nfsv4 close on a file until
2766 * the inode is cleared. This in turn means that we are not good
2767 * NFSv4 citizens - we do not indicate to the server to update the file's
2768 * share state even when we are done with one of the three share
2769 * stateid's in the inode.
2770 *
2771 * NOTE: Caller must be holding the sp->so_owner semaphore!
2772 */
2773 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2774 {
2775 struct nfs_server *server = NFS_SERVER(state->inode);
2776 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
2777 struct nfs4_closedata *calldata;
2778 struct nfs4_state_owner *sp = state->owner;
2779 struct rpc_task *task;
2780 struct rpc_message msg = {
2781 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2782 .rpc_cred = state->owner->so_cred,
2783 };
2784 struct rpc_task_setup task_setup_data = {
2785 .rpc_client = server->client,
2786 .rpc_message = &msg,
2787 .callback_ops = &nfs4_close_ops,
2788 .workqueue = nfsiod_workqueue,
2789 .flags = RPC_TASK_ASYNC,
2790 };
2791 int status = -ENOMEM;
2792
2793 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
2794 &task_setup_data.rpc_client, &msg);
2795
2796 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2797 if (calldata == NULL)
2798 goto out;
2799 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2800 calldata->inode = state->inode;
2801 calldata->state = state;
2802 calldata->arg.fh = NFS_FH(state->inode);
2803 /* Serialization for the sequence id */
2804 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
2805 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
2806 if (IS_ERR(calldata->arg.seqid))
2807 goto out_free_calldata;
2808 calldata->arg.fmode = 0;
2809 calldata->arg.bitmask = server->cache_consistency_bitmask;
2810 calldata->res.fattr = &calldata->fattr;
2811 calldata->res.seqid = calldata->arg.seqid;
2812 calldata->res.server = server;
2813 calldata->roc = nfs4_roc(state->inode);
2814 nfs_sb_active(calldata->inode->i_sb);
2815
2816 msg.rpc_argp = &calldata->arg;
2817 msg.rpc_resp = &calldata->res;
2818 task_setup_data.callback_data = calldata;
2819 task = rpc_run_task(&task_setup_data);
2820 if (IS_ERR(task))
2821 return PTR_ERR(task);
2822 status = 0;
2823 if (wait)
2824 status = rpc_wait_for_completion_task(task);
2825 rpc_put_task(task);
2826 return status;
2827 out_free_calldata:
2828 kfree(calldata);
2829 out:
2830 nfs4_put_open_state(state);
2831 nfs4_put_state_owner(sp);
2832 return status;
2833 }
2834
2835 static struct inode *
2836 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2837 int open_flags, struct iattr *attr, int *opened)
2838 {
2839 struct nfs4_state *state;
2840 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
2841
2842 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2843
2844 /* Protect against concurrent sillydeletes */
2845 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
2846
2847 nfs4_label_release_security(label);
2848
2849 if (IS_ERR(state))
2850 return ERR_CAST(state);
2851 return state->inode;
2852 }
2853
2854 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2855 {
2856 if (ctx->state == NULL)
2857 return;
2858 if (is_sync)
2859 nfs4_close_sync(ctx->state, ctx->mode);
2860 else
2861 nfs4_close_state(ctx->state, ctx->mode);
2862 }
2863
2864 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
2865 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
2866 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
2867
2868 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2869 {
2870 struct nfs4_server_caps_arg args = {
2871 .fhandle = fhandle,
2872 };
2873 struct nfs4_server_caps_res res = {};
2874 struct rpc_message msg = {
2875 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2876 .rpc_argp = &args,
2877 .rpc_resp = &res,
2878 };
2879 int status;
2880
2881 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2882 if (status == 0) {
2883 /* Sanity check the server answers */
2884 switch (server->nfs_client->cl_minorversion) {
2885 case 0:
2886 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
2887 res.attr_bitmask[2] = 0;
2888 break;
2889 case 1:
2890 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
2891 break;
2892 case 2:
2893 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
2894 }
2895 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2896 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2897 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2898 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2899 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2900 NFS_CAP_CTIME|NFS_CAP_MTIME|
2901 NFS_CAP_SECURITY_LABEL);
2902 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
2903 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
2904 server->caps |= NFS_CAP_ACLS;
2905 if (res.has_links != 0)
2906 server->caps |= NFS_CAP_HARDLINKS;
2907 if (res.has_symlinks != 0)
2908 server->caps |= NFS_CAP_SYMLINKS;
2909 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2910 server->caps |= NFS_CAP_FILEID;
2911 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2912 server->caps |= NFS_CAP_MODE;
2913 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2914 server->caps |= NFS_CAP_NLINK;
2915 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2916 server->caps |= NFS_CAP_OWNER;
2917 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2918 server->caps |= NFS_CAP_OWNER_GROUP;
2919 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2920 server->caps |= NFS_CAP_ATIME;
2921 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2922 server->caps |= NFS_CAP_CTIME;
2923 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2924 server->caps |= NFS_CAP_MTIME;
2925 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
2926 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
2927 server->caps |= NFS_CAP_SECURITY_LABEL;
2928 #endif
2929 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
2930 sizeof(server->attr_bitmask));
2931 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
2932
2933 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2934 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2935 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2936 server->cache_consistency_bitmask[2] = 0;
2937 server->acl_bitmask = res.acl_bitmask;
2938 server->fh_expire_type = res.fh_expire_type;
2939 }
2940
2941 return status;
2942 }
2943
2944 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2945 {
2946 struct nfs4_exception exception = { };
2947 int err;
2948 do {
2949 err = nfs4_handle_exception(server,
2950 _nfs4_server_capabilities(server, fhandle),
2951 &exception);
2952 } while (exception.retry);
2953 return err;
2954 }
2955
2956 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2957 struct nfs_fsinfo *info)
2958 {
2959 u32 bitmask[3];
2960 struct nfs4_lookup_root_arg args = {
2961 .bitmask = bitmask,
2962 };
2963 struct nfs4_lookup_res res = {
2964 .server = server,
2965 .fattr = info->fattr,
2966 .fh = fhandle,
2967 };
2968 struct rpc_message msg = {
2969 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2970 .rpc_argp = &args,
2971 .rpc_resp = &res,
2972 };
2973
2974 bitmask[0] = nfs4_fattr_bitmap[0];
2975 bitmask[1] = nfs4_fattr_bitmap[1];
2976 /*
2977 * Process the label in the upcoming getfattr
2978 */
2979 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
2980
2981 nfs_fattr_init(info->fattr);
2982 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2983 }
2984
2985 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2986 struct nfs_fsinfo *info)
2987 {
2988 struct nfs4_exception exception = { };
2989 int err;
2990 do {
2991 err = _nfs4_lookup_root(server, fhandle, info);
2992 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
2993 switch (err) {
2994 case 0:
2995 case -NFS4ERR_WRONGSEC:
2996 goto out;
2997 default:
2998 err = nfs4_handle_exception(server, err, &exception);
2999 }
3000 } while (exception.retry);
3001 out:
3002 return err;
3003 }
3004
3005 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3006 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3007 {
3008 struct rpc_auth_create_args auth_args = {
3009 .pseudoflavor = flavor,
3010 };
3011 struct rpc_auth *auth;
3012 int ret;
3013
3014 auth = rpcauth_create(&auth_args, server->client);
3015 if (IS_ERR(auth)) {
3016 ret = -EACCES;
3017 goto out;
3018 }
3019 ret = nfs4_lookup_root(server, fhandle, info);
3020 out:
3021 return ret;
3022 }
3023
3024 /*
3025 * Retry pseudoroot lookup with various security flavors. We do this when:
3026 *
3027 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3028 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3029 *
3030 * Returns zero on success, or a negative NFS4ERR value, or a
3031 * negative errno value.
3032 */
3033 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3034 struct nfs_fsinfo *info)
3035 {
3036 /* Per 3530bis 15.33.5 */
3037 static const rpc_authflavor_t flav_array[] = {
3038 RPC_AUTH_GSS_KRB5P,
3039 RPC_AUTH_GSS_KRB5I,
3040 RPC_AUTH_GSS_KRB5,
3041 RPC_AUTH_UNIX, /* courtesy */
3042 RPC_AUTH_NULL,
3043 };
3044 int status = -EPERM;
3045 size_t i;
3046
3047 if (server->auth_info.flavor_len > 0) {
3048 /* try each flavor specified by user */
3049 for (i = 0; i < server->auth_info.flavor_len; i++) {
3050 status = nfs4_lookup_root_sec(server, fhandle, info,
3051 server->auth_info.flavors[i]);
3052 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3053 continue;
3054 break;
3055 }
3056 } else {
3057 /* no flavors specified by user, try default list */
3058 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3059 status = nfs4_lookup_root_sec(server, fhandle, info,
3060 flav_array[i]);
3061 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3062 continue;
3063 break;
3064 }
3065 }
3066
3067 /*
3068 * -EACCESS could mean that the user doesn't have correct permissions
3069 * to access the mount. It could also mean that we tried to mount
3070 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3071 * existing mount programs don't handle -EACCES very well so it should
3072 * be mapped to -EPERM instead.
3073 */
3074 if (status == -EACCES)
3075 status = -EPERM;
3076 return status;
3077 }
3078
3079 static int nfs4_do_find_root_sec(struct nfs_server *server,
3080 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
3081 {
3082 int mv = server->nfs_client->cl_minorversion;
3083 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
3084 }
3085
3086 /**
3087 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3088 * @server: initialized nfs_server handle
3089 * @fhandle: we fill in the pseudo-fs root file handle
3090 * @info: we fill in an FSINFO struct
3091 * @auth_probe: probe the auth flavours
3092 *
3093 * Returns zero on success, or a negative errno.
3094 */
3095 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3096 struct nfs_fsinfo *info,
3097 bool auth_probe)
3098 {
3099 int status;
3100
3101 switch (auth_probe) {
3102 case false:
3103 status = nfs4_lookup_root(server, fhandle, info);
3104 if (status != -NFS4ERR_WRONGSEC)
3105 break;
3106 default:
3107 status = nfs4_do_find_root_sec(server, fhandle, info);
3108 }
3109
3110 if (status == 0)
3111 status = nfs4_server_capabilities(server, fhandle);
3112 if (status == 0)
3113 status = nfs4_do_fsinfo(server, fhandle, info);
3114
3115 return nfs4_map_errors(status);
3116 }
3117
3118 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3119 struct nfs_fsinfo *info)
3120 {
3121 int error;
3122 struct nfs_fattr *fattr = info->fattr;
3123 struct nfs4_label *label = NULL;
3124
3125 error = nfs4_server_capabilities(server, mntfh);
3126 if (error < 0) {
3127 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3128 return error;
3129 }
3130
3131 label = nfs4_label_alloc(server, GFP_KERNEL);
3132 if (IS_ERR(label))
3133 return PTR_ERR(label);
3134
3135 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3136 if (error < 0) {
3137 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3138 goto err_free_label;
3139 }
3140
3141 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3142 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3143 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3144
3145 err_free_label:
3146 nfs4_label_free(label);
3147
3148 return error;
3149 }
3150
3151 /*
3152 * Get locations and (maybe) other attributes of a referral.
3153 * Note that we'll actually follow the referral later when
3154 * we detect fsid mismatch in inode revalidation
3155 */
3156 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3157 const struct qstr *name, struct nfs_fattr *fattr,
3158 struct nfs_fh *fhandle)
3159 {
3160 int status = -ENOMEM;
3161 struct page *page = NULL;
3162 struct nfs4_fs_locations *locations = NULL;
3163
3164 page = alloc_page(GFP_KERNEL);
3165 if (page == NULL)
3166 goto out;
3167 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3168 if (locations == NULL)
3169 goto out;
3170
3171 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3172 if (status != 0)
3173 goto out;
3174
3175 /*
3176 * If the fsid didn't change, this is a migration event, not a
3177 * referral. Cause us to drop into the exception handler, which
3178 * will kick off migration recovery.
3179 */
3180 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3181 dprintk("%s: server did not return a different fsid for"
3182 " a referral at %s\n", __func__, name->name);
3183 status = -NFS4ERR_MOVED;
3184 goto out;
3185 }
3186 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3187 nfs_fixup_referral_attributes(&locations->fattr);
3188
3189 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3190 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3191 memset(fhandle, 0, sizeof(struct nfs_fh));
3192 out:
3193 if (page)
3194 __free_page(page);
3195 kfree(locations);
3196 return status;
3197 }
3198
3199 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3200 struct nfs_fattr *fattr, struct nfs4_label *label)
3201 {
3202 struct nfs4_getattr_arg args = {
3203 .fh = fhandle,
3204 .bitmask = server->attr_bitmask,
3205 };
3206 struct nfs4_getattr_res res = {
3207 .fattr = fattr,
3208 .label = label,
3209 .server = server,
3210 };
3211 struct rpc_message msg = {
3212 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3213 .rpc_argp = &args,
3214 .rpc_resp = &res,
3215 };
3216
3217 args.bitmask = nfs4_bitmask(server, label);
3218
3219 nfs_fattr_init(fattr);
3220 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3221 }
3222
3223 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3224 struct nfs_fattr *fattr, struct nfs4_label *label)
3225 {
3226 struct nfs4_exception exception = { };
3227 int err;
3228 do {
3229 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3230 trace_nfs4_getattr(server, fhandle, fattr, err);
3231 err = nfs4_handle_exception(server, err,
3232 &exception);
3233 } while (exception.retry);
3234 return err;
3235 }
3236
3237 /*
3238 * The file is not closed if it is opened due to the a request to change
3239 * the size of the file. The open call will not be needed once the
3240 * VFS layer lookup-intents are implemented.
3241 *
3242 * Close is called when the inode is destroyed.
3243 * If we haven't opened the file for O_WRONLY, we
3244 * need to in the size_change case to obtain a stateid.
3245 *
3246 * Got race?
3247 * Because OPEN is always done by name in nfsv4, it is
3248 * possible that we opened a different file by the same
3249 * name. We can recognize this race condition, but we
3250 * can't do anything about it besides returning an error.
3251 *
3252 * This will be fixed with VFS changes (lookup-intent).
3253 */
3254 static int
3255 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3256 struct iattr *sattr)
3257 {
3258 struct inode *inode = dentry->d_inode;
3259 struct rpc_cred *cred = NULL;
3260 struct nfs4_state *state = NULL;
3261 struct nfs4_label *label = NULL;
3262 int status;
3263
3264 if (pnfs_ld_layoutret_on_setattr(inode) &&
3265 sattr->ia_valid & ATTR_SIZE &&
3266 sattr->ia_size < i_size_read(inode))
3267 pnfs_commit_and_return_layout(inode);
3268
3269 nfs_fattr_init(fattr);
3270
3271 /* Deal with open(O_TRUNC) */
3272 if (sattr->ia_valid & ATTR_OPEN)
3273 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3274
3275 /* Optimization: if the end result is no change, don't RPC */
3276 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3277 return 0;
3278
3279 /* Search for an existing open(O_WRITE) file */
3280 if (sattr->ia_valid & ATTR_FILE) {
3281 struct nfs_open_context *ctx;
3282
3283 ctx = nfs_file_open_context(sattr->ia_file);
3284 if (ctx) {
3285 cred = ctx->cred;
3286 state = ctx->state;
3287 }
3288 }
3289
3290 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3291 if (IS_ERR(label))
3292 return PTR_ERR(label);
3293
3294 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3295 if (status == 0) {
3296 nfs_setattr_update_inode(inode, sattr, fattr);
3297 nfs_setsecurity(inode, fattr, label);
3298 }
3299 nfs4_label_free(label);
3300 return status;
3301 }
3302
3303 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3304 const struct qstr *name, struct nfs_fh *fhandle,
3305 struct nfs_fattr *fattr, struct nfs4_label *label)
3306 {
3307 struct nfs_server *server = NFS_SERVER(dir);
3308 int status;
3309 struct nfs4_lookup_arg args = {
3310 .bitmask = server->attr_bitmask,
3311 .dir_fh = NFS_FH(dir),
3312 .name = name,
3313 };
3314 struct nfs4_lookup_res res = {
3315 .server = server,
3316 .fattr = fattr,
3317 .label = label,
3318 .fh = fhandle,
3319 };
3320 struct rpc_message msg = {
3321 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3322 .rpc_argp = &args,
3323 .rpc_resp = &res,
3324 };
3325
3326 args.bitmask = nfs4_bitmask(server, label);
3327
3328 nfs_fattr_init(fattr);
3329
3330 dprintk("NFS call lookup %s\n", name->name);
3331 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3332 dprintk("NFS reply lookup: %d\n", status);
3333 return status;
3334 }
3335
3336 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3337 {
3338 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3339 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3340 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3341 fattr->nlink = 2;
3342 }
3343
3344 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3345 struct qstr *name, struct nfs_fh *fhandle,
3346 struct nfs_fattr *fattr, struct nfs4_label *label)
3347 {
3348 struct nfs4_exception exception = { };
3349 struct rpc_clnt *client = *clnt;
3350 int err;
3351 do {
3352 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3353 trace_nfs4_lookup(dir, name, err);
3354 switch (err) {
3355 case -NFS4ERR_BADNAME:
3356 err = -ENOENT;
3357 goto out;
3358 case -NFS4ERR_MOVED:
3359 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3360 goto out;
3361 case -NFS4ERR_WRONGSEC:
3362 err = -EPERM;
3363 if (client != *clnt)
3364 goto out;
3365 client = nfs4_negotiate_security(client, dir, name);
3366 if (IS_ERR(client))
3367 return PTR_ERR(client);
3368
3369 exception.retry = 1;
3370 break;
3371 default:
3372 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3373 }
3374 } while (exception.retry);
3375
3376 out:
3377 if (err == 0)
3378 *clnt = client;
3379 else if (client != *clnt)
3380 rpc_shutdown_client(client);
3381
3382 return err;
3383 }
3384
3385 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
3386 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3387 struct nfs4_label *label)
3388 {
3389 int status;
3390 struct rpc_clnt *client = NFS_CLIENT(dir);
3391
3392 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3393 if (client != NFS_CLIENT(dir)) {
3394 rpc_shutdown_client(client);
3395 nfs_fixup_secinfo_attributes(fattr);
3396 }
3397 return status;
3398 }
3399
3400 struct rpc_clnt *
3401 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3402 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3403 {
3404 struct rpc_clnt *client = NFS_CLIENT(dir);
3405 int status;
3406
3407 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3408 if (status < 0)
3409 return ERR_PTR(status);
3410 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3411 }
3412
3413 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3414 {
3415 struct nfs_server *server = NFS_SERVER(inode);
3416 struct nfs4_accessargs args = {
3417 .fh = NFS_FH(inode),
3418 .bitmask = server->cache_consistency_bitmask,
3419 };
3420 struct nfs4_accessres res = {
3421 .server = server,
3422 };
3423 struct rpc_message msg = {
3424 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3425 .rpc_argp = &args,
3426 .rpc_resp = &res,
3427 .rpc_cred = entry->cred,
3428 };
3429 int mode = entry->mask;
3430 int status = 0;
3431
3432 /*
3433 * Determine which access bits we want to ask for...
3434 */
3435 if (mode & MAY_READ)
3436 args.access |= NFS4_ACCESS_READ;
3437 if (S_ISDIR(inode->i_mode)) {
3438 if (mode & MAY_WRITE)
3439 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3440 if (mode & MAY_EXEC)
3441 args.access |= NFS4_ACCESS_LOOKUP;
3442 } else {
3443 if (mode & MAY_WRITE)
3444 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3445 if (mode & MAY_EXEC)
3446 args.access |= NFS4_ACCESS_EXECUTE;
3447 }
3448
3449 res.fattr = nfs_alloc_fattr();
3450 if (res.fattr == NULL)
3451 return -ENOMEM;
3452
3453 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3454 if (!status) {
3455 nfs_access_set_mask(entry, res.access);
3456 nfs_refresh_inode(inode, res.fattr);
3457 }
3458 nfs_free_fattr(res.fattr);
3459 return status;
3460 }
3461
3462 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3463 {
3464 struct nfs4_exception exception = { };
3465 int err;
3466 do {
3467 err = _nfs4_proc_access(inode, entry);
3468 trace_nfs4_access(inode, err);
3469 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3470 &exception);
3471 } while (exception.retry);
3472 return err;
3473 }
3474
3475 /*
3476 * TODO: For the time being, we don't try to get any attributes
3477 * along with any of the zero-copy operations READ, READDIR,
3478 * READLINK, WRITE.
3479 *
3480 * In the case of the first three, we want to put the GETATTR
3481 * after the read-type operation -- this is because it is hard
3482 * to predict the length of a GETATTR response in v4, and thus
3483 * align the READ data correctly. This means that the GETATTR
3484 * may end up partially falling into the page cache, and we should
3485 * shift it into the 'tail' of the xdr_buf before processing.
3486 * To do this efficiently, we need to know the total length
3487 * of data received, which doesn't seem to be available outside
3488 * of the RPC layer.
3489 *
3490 * In the case of WRITE, we also want to put the GETATTR after
3491 * the operation -- in this case because we want to make sure
3492 * we get the post-operation mtime and size.
3493 *
3494 * Both of these changes to the XDR layer would in fact be quite
3495 * minor, but I decided to leave them for a subsequent patch.
3496 */
3497 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3498 unsigned int pgbase, unsigned int pglen)
3499 {
3500 struct nfs4_readlink args = {
3501 .fh = NFS_FH(inode),
3502 .pgbase = pgbase,
3503 .pglen = pglen,
3504 .pages = &page,
3505 };
3506 struct nfs4_readlink_res res;
3507 struct rpc_message msg = {
3508 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3509 .rpc_argp = &args,
3510 .rpc_resp = &res,
3511 };
3512
3513 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3514 }
3515
3516 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3517 unsigned int pgbase, unsigned int pglen)
3518 {
3519 struct nfs4_exception exception = { };
3520 int err;
3521 do {
3522 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3523 trace_nfs4_readlink(inode, err);
3524 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3525 &exception);
3526 } while (exception.retry);
3527 return err;
3528 }
3529
3530 /*
3531 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3532 */
3533 static int
3534 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3535 int flags)
3536 {
3537 struct nfs4_label l, *ilabel = NULL;
3538 struct nfs_open_context *ctx;
3539 struct nfs4_state *state;
3540 int opened = 0;
3541 int status = 0;
3542
3543 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3544 if (IS_ERR(ctx))
3545 return PTR_ERR(ctx);
3546
3547 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3548
3549 sattr->ia_mode &= ~current_umask();
3550 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened);
3551 if (IS_ERR(state)) {
3552 status = PTR_ERR(state);
3553 goto out;
3554 }
3555 out:
3556 nfs4_label_release_security(ilabel);
3557 put_nfs_open_context(ctx);
3558 return status;
3559 }
3560
3561 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3562 {
3563 struct nfs_server *server = NFS_SERVER(dir);
3564 struct nfs_removeargs args = {
3565 .fh = NFS_FH(dir),
3566 .name = *name,
3567 };
3568 struct nfs_removeres res = {
3569 .server = server,
3570 };
3571 struct rpc_message msg = {
3572 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3573 .rpc_argp = &args,
3574 .rpc_resp = &res,
3575 };
3576 int status;
3577
3578 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3579 if (status == 0)
3580 update_changeattr(dir, &res.cinfo);
3581 return status;
3582 }
3583
3584 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3585 {
3586 struct nfs4_exception exception = { };
3587 int err;
3588 do {
3589 err = _nfs4_proc_remove(dir, name);
3590 trace_nfs4_remove(dir, name, err);
3591 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3592 &exception);
3593 } while (exception.retry);
3594 return err;
3595 }
3596
3597 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3598 {
3599 struct nfs_server *server = NFS_SERVER(dir);
3600 struct nfs_removeargs *args = msg->rpc_argp;
3601 struct nfs_removeres *res = msg->rpc_resp;
3602
3603 res->server = server;
3604 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3605 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3606
3607 nfs_fattr_init(res->dir_attr);
3608 }
3609
3610 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3611 {
3612 nfs4_setup_sequence(NFS_SERVER(data->dir),
3613 &data->args.seq_args,
3614 &data->res.seq_res,
3615 task);
3616 }
3617
3618 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3619 {
3620 struct nfs_unlinkdata *data = task->tk_calldata;
3621 struct nfs_removeres *res = &data->res;
3622
3623 if (!nfs4_sequence_done(task, &res->seq_res))
3624 return 0;
3625 if (nfs4_async_handle_error(task, res->server, NULL,
3626 &data->timeout) == -EAGAIN)
3627 return 0;
3628 update_changeattr(dir, &res->cinfo);
3629 return 1;
3630 }
3631
3632 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3633 {
3634 struct nfs_server *server = NFS_SERVER(dir);
3635 struct nfs_renameargs *arg = msg->rpc_argp;
3636 struct nfs_renameres *res = msg->rpc_resp;
3637
3638 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3639 res->server = server;
3640 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3641 }
3642
3643 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3644 {
3645 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3646 &data->args.seq_args,
3647 &data->res.seq_res,
3648 task);
3649 }
3650
3651 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3652 struct inode *new_dir)
3653 {
3654 struct nfs_renamedata *data = task->tk_calldata;
3655 struct nfs_renameres *res = &data->res;
3656
3657 if (!nfs4_sequence_done(task, &res->seq_res))
3658 return 0;
3659 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3660 return 0;
3661
3662 update_changeattr(old_dir, &res->old_cinfo);
3663 update_changeattr(new_dir, &res->new_cinfo);
3664 return 1;
3665 }
3666
3667 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3668 {
3669 struct nfs_server *server = NFS_SERVER(inode);
3670 struct nfs4_link_arg arg = {
3671 .fh = NFS_FH(inode),
3672 .dir_fh = NFS_FH(dir),
3673 .name = name,
3674 .bitmask = server->attr_bitmask,
3675 };
3676 struct nfs4_link_res res = {
3677 .server = server,
3678 .label = NULL,
3679 };
3680 struct rpc_message msg = {
3681 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3682 .rpc_argp = &arg,
3683 .rpc_resp = &res,
3684 };
3685 int status = -ENOMEM;
3686
3687 res.fattr = nfs_alloc_fattr();
3688 if (res.fattr == NULL)
3689 goto out;
3690
3691 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3692 if (IS_ERR(res.label)) {
3693 status = PTR_ERR(res.label);
3694 goto out;
3695 }
3696 arg.bitmask = nfs4_bitmask(server, res.label);
3697
3698 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3699 if (!status) {
3700 update_changeattr(dir, &res.cinfo);
3701 status = nfs_post_op_update_inode(inode, res.fattr);
3702 if (!status)
3703 nfs_setsecurity(inode, res.fattr, res.label);
3704 }
3705
3706
3707 nfs4_label_free(res.label);
3708
3709 out:
3710 nfs_free_fattr(res.fattr);
3711 return status;
3712 }
3713
3714 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3715 {
3716 struct nfs4_exception exception = { };
3717 int err;
3718 do {
3719 err = nfs4_handle_exception(NFS_SERVER(inode),
3720 _nfs4_proc_link(inode, dir, name),
3721 &exception);
3722 } while (exception.retry);
3723 return err;
3724 }
3725
3726 struct nfs4_createdata {
3727 struct rpc_message msg;
3728 struct nfs4_create_arg arg;
3729 struct nfs4_create_res res;
3730 struct nfs_fh fh;
3731 struct nfs_fattr fattr;
3732 struct nfs4_label *label;
3733 };
3734
3735 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3736 struct qstr *name, struct iattr *sattr, u32 ftype)
3737 {
3738 struct nfs4_createdata *data;
3739
3740 data = kzalloc(sizeof(*data), GFP_KERNEL);
3741 if (data != NULL) {
3742 struct nfs_server *server = NFS_SERVER(dir);
3743
3744 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3745 if (IS_ERR(data->label))
3746 goto out_free;
3747
3748 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3749 data->msg.rpc_argp = &data->arg;
3750 data->msg.rpc_resp = &data->res;
3751 data->arg.dir_fh = NFS_FH(dir);
3752 data->arg.server = server;
3753 data->arg.name = name;
3754 data->arg.attrs = sattr;
3755 data->arg.ftype = ftype;
3756 data->arg.bitmask = nfs4_bitmask(server, data->label);
3757 data->res.server = server;
3758 data->res.fh = &data->fh;
3759 data->res.fattr = &data->fattr;
3760 data->res.label = data->label;
3761 nfs_fattr_init(data->res.fattr);
3762 }
3763 return data;
3764 out_free:
3765 kfree(data);
3766 return NULL;
3767 }
3768
3769 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3770 {
3771 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3772 &data->arg.seq_args, &data->res.seq_res, 1);
3773 if (status == 0) {
3774 update_changeattr(dir, &data->res.dir_cinfo);
3775 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
3776 }
3777 return status;
3778 }
3779
3780 static void nfs4_free_createdata(struct nfs4_createdata *data)
3781 {
3782 nfs4_label_free(data->label);
3783 kfree(data);
3784 }
3785
3786 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3787 struct page *page, unsigned int len, struct iattr *sattr,
3788 struct nfs4_label *label)
3789 {
3790 struct nfs4_createdata *data;
3791 int status = -ENAMETOOLONG;
3792
3793 if (len > NFS4_MAXPATHLEN)
3794 goto out;
3795
3796 status = -ENOMEM;
3797 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3798 if (data == NULL)
3799 goto out;
3800
3801 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3802 data->arg.u.symlink.pages = &page;
3803 data->arg.u.symlink.len = len;
3804 data->arg.label = label;
3805
3806 status = nfs4_do_create(dir, dentry, data);
3807
3808 nfs4_free_createdata(data);
3809 out:
3810 return status;
3811 }
3812
3813 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3814 struct page *page, unsigned int len, struct iattr *sattr)
3815 {
3816 struct nfs4_exception exception = { };
3817 struct nfs4_label l, *label = NULL;
3818 int err;
3819
3820 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3821
3822 do {
3823 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
3824 trace_nfs4_symlink(dir, &dentry->d_name, err);
3825 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3826 &exception);
3827 } while (exception.retry);
3828
3829 nfs4_label_release_security(label);
3830 return err;
3831 }
3832
3833 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3834 struct iattr *sattr, struct nfs4_label *label)
3835 {
3836 struct nfs4_createdata *data;
3837 int status = -ENOMEM;
3838
3839 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3840 if (data == NULL)
3841 goto out;
3842
3843 data->arg.label = label;
3844 status = nfs4_do_create(dir, dentry, data);
3845
3846 nfs4_free_createdata(data);
3847 out:
3848 return status;
3849 }
3850
3851 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3852 struct iattr *sattr)
3853 {
3854 struct nfs4_exception exception = { };
3855 struct nfs4_label l, *label = NULL;
3856 int err;
3857
3858 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3859
3860 sattr->ia_mode &= ~current_umask();
3861 do {
3862 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
3863 trace_nfs4_mkdir(dir, &dentry->d_name, err);
3864 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3865 &exception);
3866 } while (exception.retry);
3867 nfs4_label_release_security(label);
3868
3869 return err;
3870 }
3871
3872 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3873 u64 cookie, struct page **pages, unsigned int count, int plus)
3874 {
3875 struct inode *dir = dentry->d_inode;
3876 struct nfs4_readdir_arg args = {
3877 .fh = NFS_FH(dir),
3878 .pages = pages,
3879 .pgbase = 0,
3880 .count = count,
3881 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3882 .plus = plus,
3883 };
3884 struct nfs4_readdir_res res;
3885 struct rpc_message msg = {
3886 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3887 .rpc_argp = &args,
3888 .rpc_resp = &res,
3889 .rpc_cred = cred,
3890 };
3891 int status;
3892
3893 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
3894 dentry,
3895 (unsigned long long)cookie);
3896 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3897 res.pgbase = args.pgbase;
3898 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3899 if (status >= 0) {
3900 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3901 status += args.pgbase;
3902 }
3903
3904 nfs_invalidate_atime(dir);
3905
3906 dprintk("%s: returns %d\n", __func__, status);
3907 return status;
3908 }
3909
3910 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3911 u64 cookie, struct page **pages, unsigned int count, int plus)
3912 {
3913 struct nfs4_exception exception = { };
3914 int err;
3915 do {
3916 err = _nfs4_proc_readdir(dentry, cred, cookie,
3917 pages, count, plus);
3918 trace_nfs4_readdir(dentry->d_inode, err);
3919 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), err,
3920 &exception);
3921 } while (exception.retry);
3922 return err;
3923 }
3924
3925 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3926 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
3927 {
3928 struct nfs4_createdata *data;
3929 int mode = sattr->ia_mode;
3930 int status = -ENOMEM;
3931
3932 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3933 if (data == NULL)
3934 goto out;
3935
3936 if (S_ISFIFO(mode))
3937 data->arg.ftype = NF4FIFO;
3938 else if (S_ISBLK(mode)) {
3939 data->arg.ftype = NF4BLK;
3940 data->arg.u.device.specdata1 = MAJOR(rdev);
3941 data->arg.u.device.specdata2 = MINOR(rdev);
3942 }
3943 else if (S_ISCHR(mode)) {
3944 data->arg.ftype = NF4CHR;
3945 data->arg.u.device.specdata1 = MAJOR(rdev);
3946 data->arg.u.device.specdata2 = MINOR(rdev);
3947 } else if (!S_ISSOCK(mode)) {
3948 status = -EINVAL;
3949 goto out_free;
3950 }
3951
3952 data->arg.label = label;
3953 status = nfs4_do_create(dir, dentry, data);
3954 out_free:
3955 nfs4_free_createdata(data);
3956 out:
3957 return status;
3958 }
3959
3960 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3961 struct iattr *sattr, dev_t rdev)
3962 {
3963 struct nfs4_exception exception = { };
3964 struct nfs4_label l, *label = NULL;
3965 int err;
3966
3967 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3968
3969 sattr->ia_mode &= ~current_umask();
3970 do {
3971 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
3972 trace_nfs4_mknod(dir, &dentry->d_name, err);
3973 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3974 &exception);
3975 } while (exception.retry);
3976
3977 nfs4_label_release_security(label);
3978
3979 return err;
3980 }
3981
3982 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3983 struct nfs_fsstat *fsstat)
3984 {
3985 struct nfs4_statfs_arg args = {
3986 .fh = fhandle,
3987 .bitmask = server->attr_bitmask,
3988 };
3989 struct nfs4_statfs_res res = {
3990 .fsstat = fsstat,
3991 };
3992 struct rpc_message msg = {
3993 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3994 .rpc_argp = &args,
3995 .rpc_resp = &res,
3996 };
3997
3998 nfs_fattr_init(fsstat->fattr);
3999 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4000 }
4001
4002 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4003 {
4004 struct nfs4_exception exception = { };
4005 int err;
4006 do {
4007 err = nfs4_handle_exception(server,
4008 _nfs4_proc_statfs(server, fhandle, fsstat),
4009 &exception);
4010 } while (exception.retry);
4011 return err;
4012 }
4013
4014 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4015 struct nfs_fsinfo *fsinfo)
4016 {
4017 struct nfs4_fsinfo_arg args = {
4018 .fh = fhandle,
4019 .bitmask = server->attr_bitmask,
4020 };
4021 struct nfs4_fsinfo_res res = {
4022 .fsinfo = fsinfo,
4023 };
4024 struct rpc_message msg = {
4025 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4026 .rpc_argp = &args,
4027 .rpc_resp = &res,
4028 };
4029
4030 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4031 }
4032
4033 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4034 {
4035 struct nfs4_exception exception = { };
4036 unsigned long now = jiffies;
4037 int err;
4038
4039 do {
4040 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4041 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4042 if (err == 0) {
4043 struct nfs_client *clp = server->nfs_client;
4044
4045 spin_lock(&clp->cl_lock);
4046 clp->cl_lease_time = fsinfo->lease_time * HZ;
4047 clp->cl_last_renewal = now;
4048 spin_unlock(&clp->cl_lock);
4049 break;
4050 }
4051 err = nfs4_handle_exception(server, err, &exception);
4052 } while (exception.retry);
4053 return err;
4054 }
4055
4056 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4057 {
4058 int error;
4059
4060 nfs_fattr_init(fsinfo->fattr);
4061 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4062 if (error == 0) {
4063 /* block layout checks this! */
4064 server->pnfs_blksize = fsinfo->blksize;
4065 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4066 }
4067
4068 return error;
4069 }
4070
4071 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4072 struct nfs_pathconf *pathconf)
4073 {
4074 struct nfs4_pathconf_arg args = {
4075 .fh = fhandle,
4076 .bitmask = server->attr_bitmask,
4077 };
4078 struct nfs4_pathconf_res res = {
4079 .pathconf = pathconf,
4080 };
4081 struct rpc_message msg = {
4082 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4083 .rpc_argp = &args,
4084 .rpc_resp = &res,
4085 };
4086
4087 /* None of the pathconf attributes are mandatory to implement */
4088 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4089 memset(pathconf, 0, sizeof(*pathconf));
4090 return 0;
4091 }
4092
4093 nfs_fattr_init(pathconf->fattr);
4094 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4095 }
4096
4097 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4098 struct nfs_pathconf *pathconf)
4099 {
4100 struct nfs4_exception exception = { };
4101 int err;
4102
4103 do {
4104 err = nfs4_handle_exception(server,
4105 _nfs4_proc_pathconf(server, fhandle, pathconf),
4106 &exception);
4107 } while (exception.retry);
4108 return err;
4109 }
4110
4111 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4112 const struct nfs_open_context *ctx,
4113 const struct nfs_lock_context *l_ctx,
4114 fmode_t fmode)
4115 {
4116 const struct nfs_lockowner *lockowner = NULL;
4117
4118 if (l_ctx != NULL)
4119 lockowner = &l_ctx->lockowner;
4120 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
4121 }
4122 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4123
4124 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4125 const struct nfs_open_context *ctx,
4126 const struct nfs_lock_context *l_ctx,
4127 fmode_t fmode)
4128 {
4129 nfs4_stateid current_stateid;
4130
4131 /* If the current stateid represents a lost lock, then exit */
4132 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4133 return true;
4134 return nfs4_stateid_match(stateid, &current_stateid);
4135 }
4136
4137 static bool nfs4_error_stateid_expired(int err)
4138 {
4139 switch (err) {
4140 case -NFS4ERR_DELEG_REVOKED:
4141 case -NFS4ERR_ADMIN_REVOKED:
4142 case -NFS4ERR_BAD_STATEID:
4143 case -NFS4ERR_STALE_STATEID:
4144 case -NFS4ERR_OLD_STATEID:
4145 case -NFS4ERR_OPENMODE:
4146 case -NFS4ERR_EXPIRED:
4147 return true;
4148 }
4149 return false;
4150 }
4151
4152 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4153 {
4154 nfs_invalidate_atime(hdr->inode);
4155 }
4156
4157 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4158 {
4159 struct nfs_server *server = NFS_SERVER(hdr->inode);
4160
4161 trace_nfs4_read(hdr, task->tk_status);
4162 if (nfs4_async_handle_error(task, server,
4163 hdr->args.context->state,
4164 NULL) == -EAGAIN) {
4165 rpc_restart_call_prepare(task);
4166 return -EAGAIN;
4167 }
4168
4169 __nfs4_read_done_cb(hdr);
4170 if (task->tk_status > 0)
4171 renew_lease(server, hdr->timestamp);
4172 return 0;
4173 }
4174
4175 static bool nfs4_read_stateid_changed(struct rpc_task *task,
4176 struct nfs_pgio_args *args)
4177 {
4178
4179 if (!nfs4_error_stateid_expired(task->tk_status) ||
4180 nfs4_stateid_is_current(&args->stateid,
4181 args->context,
4182 args->lock_context,
4183 FMODE_READ))
4184 return false;
4185 rpc_restart_call_prepare(task);
4186 return true;
4187 }
4188
4189 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4190 {
4191
4192 dprintk("--> %s\n", __func__);
4193
4194 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4195 return -EAGAIN;
4196 if (nfs4_read_stateid_changed(task, &hdr->args))
4197 return -EAGAIN;
4198 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4199 nfs4_read_done_cb(task, hdr);
4200 }
4201
4202 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4203 struct rpc_message *msg)
4204 {
4205 hdr->timestamp = jiffies;
4206 hdr->pgio_done_cb = nfs4_read_done_cb;
4207 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4208 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4209 }
4210
4211 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4212 struct nfs_pgio_header *hdr)
4213 {
4214 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4215 &hdr->args.seq_args,
4216 &hdr->res.seq_res,
4217 task))
4218 return 0;
4219 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4220 hdr->args.lock_context,
4221 hdr->rw_ops->rw_mode) == -EIO)
4222 return -EIO;
4223 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4224 return -EIO;
4225 return 0;
4226 }
4227
4228 static int nfs4_write_done_cb(struct rpc_task *task,
4229 struct nfs_pgio_header *hdr)
4230 {
4231 struct inode *inode = hdr->inode;
4232
4233 trace_nfs4_write(hdr, task->tk_status);
4234 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4235 hdr->args.context->state,
4236 NULL) == -EAGAIN) {
4237 rpc_restart_call_prepare(task);
4238 return -EAGAIN;
4239 }
4240 if (task->tk_status >= 0) {
4241 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4242 nfs_writeback_update_inode(hdr);
4243 }
4244 return 0;
4245 }
4246
4247 static bool nfs4_write_stateid_changed(struct rpc_task *task,
4248 struct nfs_pgio_args *args)
4249 {
4250
4251 if (!nfs4_error_stateid_expired(task->tk_status) ||
4252 nfs4_stateid_is_current(&args->stateid,
4253 args->context,
4254 args->lock_context,
4255 FMODE_WRITE))
4256 return false;
4257 rpc_restart_call_prepare(task);
4258 return true;
4259 }
4260
4261 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4262 {
4263 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4264 return -EAGAIN;
4265 if (nfs4_write_stateid_changed(task, &hdr->args))
4266 return -EAGAIN;
4267 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4268 nfs4_write_done_cb(task, hdr);
4269 }
4270
4271 static
4272 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4273 {
4274 /* Don't request attributes for pNFS or O_DIRECT writes */
4275 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4276 return false;
4277 /* Otherwise, request attributes if and only if we don't hold
4278 * a delegation
4279 */
4280 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4281 }
4282
4283 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4284 struct rpc_message *msg)
4285 {
4286 struct nfs_server *server = NFS_SERVER(hdr->inode);
4287
4288 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4289 hdr->args.bitmask = NULL;
4290 hdr->res.fattr = NULL;
4291 } else
4292 hdr->args.bitmask = server->cache_consistency_bitmask;
4293
4294 if (!hdr->pgio_done_cb)
4295 hdr->pgio_done_cb = nfs4_write_done_cb;
4296 hdr->res.server = server;
4297 hdr->timestamp = jiffies;
4298
4299 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4300 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4301 }
4302
4303 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4304 {
4305 nfs4_setup_sequence(NFS_SERVER(data->inode),
4306 &data->args.seq_args,
4307 &data->res.seq_res,
4308 task);
4309 }
4310
4311 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4312 {
4313 struct inode *inode = data->inode;
4314
4315 trace_nfs4_commit(data, task->tk_status);
4316 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4317 NULL, NULL) == -EAGAIN) {
4318 rpc_restart_call_prepare(task);
4319 return -EAGAIN;
4320 }
4321 return 0;
4322 }
4323
4324 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4325 {
4326 if (!nfs4_sequence_done(task, &data->res.seq_res))
4327 return -EAGAIN;
4328 return data->commit_done_cb(task, data);
4329 }
4330
4331 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4332 {
4333 struct nfs_server *server = NFS_SERVER(data->inode);
4334
4335 if (data->commit_done_cb == NULL)
4336 data->commit_done_cb = nfs4_commit_done_cb;
4337 data->res.server = server;
4338 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4339 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4340 }
4341
4342 struct nfs4_renewdata {
4343 struct nfs_client *client;
4344 unsigned long timestamp;
4345 };
4346
4347 /*
4348 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4349 * standalone procedure for queueing an asynchronous RENEW.
4350 */
4351 static void nfs4_renew_release(void *calldata)
4352 {
4353 struct nfs4_renewdata *data = calldata;
4354 struct nfs_client *clp = data->client;
4355
4356 if (atomic_read(&clp->cl_count) > 1)
4357 nfs4_schedule_state_renewal(clp);
4358 nfs_put_client(clp);
4359 kfree(data);
4360 }
4361
4362 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4363 {
4364 struct nfs4_renewdata *data = calldata;
4365 struct nfs_client *clp = data->client;
4366 unsigned long timestamp = data->timestamp;
4367
4368 trace_nfs4_renew_async(clp, task->tk_status);
4369 switch (task->tk_status) {
4370 case 0:
4371 break;
4372 case -NFS4ERR_LEASE_MOVED:
4373 nfs4_schedule_lease_moved_recovery(clp);
4374 break;
4375 default:
4376 /* Unless we're shutting down, schedule state recovery! */
4377 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4378 return;
4379 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4380 nfs4_schedule_lease_recovery(clp);
4381 return;
4382 }
4383 nfs4_schedule_path_down_recovery(clp);
4384 }
4385 do_renew_lease(clp, timestamp);
4386 }
4387
4388 static const struct rpc_call_ops nfs4_renew_ops = {
4389 .rpc_call_done = nfs4_renew_done,
4390 .rpc_release = nfs4_renew_release,
4391 };
4392
4393 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4394 {
4395 struct rpc_message msg = {
4396 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4397 .rpc_argp = clp,
4398 .rpc_cred = cred,
4399 };
4400 struct nfs4_renewdata *data;
4401
4402 if (renew_flags == 0)
4403 return 0;
4404 if (!atomic_inc_not_zero(&clp->cl_count))
4405 return -EIO;
4406 data = kmalloc(sizeof(*data), GFP_NOFS);
4407 if (data == NULL)
4408 return -ENOMEM;
4409 data->client = clp;
4410 data->timestamp = jiffies;
4411 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4412 &nfs4_renew_ops, data);
4413 }
4414
4415 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4416 {
4417 struct rpc_message msg = {
4418 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4419 .rpc_argp = clp,
4420 .rpc_cred = cred,
4421 };
4422 unsigned long now = jiffies;
4423 int status;
4424
4425 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4426 if (status < 0)
4427 return status;
4428 do_renew_lease(clp, now);
4429 return 0;
4430 }
4431
4432 static inline int nfs4_server_supports_acls(struct nfs_server *server)
4433 {
4434 return server->caps & NFS_CAP_ACLS;
4435 }
4436
4437 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4438 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4439 * the stack.
4440 */
4441 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4442
4443 static int buf_to_pages_noslab(const void *buf, size_t buflen,
4444 struct page **pages, unsigned int *pgbase)
4445 {
4446 struct page *newpage, **spages;
4447 int rc = 0;
4448 size_t len;
4449 spages = pages;
4450
4451 do {
4452 len = min_t(size_t, PAGE_SIZE, buflen);
4453 newpage = alloc_page(GFP_KERNEL);
4454
4455 if (newpage == NULL)
4456 goto unwind;
4457 memcpy(page_address(newpage), buf, len);
4458 buf += len;
4459 buflen -= len;
4460 *pages++ = newpage;
4461 rc++;
4462 } while (buflen != 0);
4463
4464 return rc;
4465
4466 unwind:
4467 for(; rc > 0; rc--)
4468 __free_page(spages[rc-1]);
4469 return -ENOMEM;
4470 }
4471
4472 struct nfs4_cached_acl {
4473 int cached;
4474 size_t len;
4475 char data[0];
4476 };
4477
4478 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4479 {
4480 struct nfs_inode *nfsi = NFS_I(inode);
4481
4482 spin_lock(&inode->i_lock);
4483 kfree(nfsi->nfs4_acl);
4484 nfsi->nfs4_acl = acl;
4485 spin_unlock(&inode->i_lock);
4486 }
4487
4488 static void nfs4_zap_acl_attr(struct inode *inode)
4489 {
4490 nfs4_set_cached_acl(inode, NULL);
4491 }
4492
4493 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4494 {
4495 struct nfs_inode *nfsi = NFS_I(inode);
4496 struct nfs4_cached_acl *acl;
4497 int ret = -ENOENT;
4498
4499 spin_lock(&inode->i_lock);
4500 acl = nfsi->nfs4_acl;
4501 if (acl == NULL)
4502 goto out;
4503 if (buf == NULL) /* user is just asking for length */
4504 goto out_len;
4505 if (acl->cached == 0)
4506 goto out;
4507 ret = -ERANGE; /* see getxattr(2) man page */
4508 if (acl->len > buflen)
4509 goto out;
4510 memcpy(buf, acl->data, acl->len);
4511 out_len:
4512 ret = acl->len;
4513 out:
4514 spin_unlock(&inode->i_lock);
4515 return ret;
4516 }
4517
4518 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4519 {
4520 struct nfs4_cached_acl *acl;
4521 size_t buflen = sizeof(*acl) + acl_len;
4522
4523 if (buflen <= PAGE_SIZE) {
4524 acl = kmalloc(buflen, GFP_KERNEL);
4525 if (acl == NULL)
4526 goto out;
4527 acl->cached = 1;
4528 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4529 } else {
4530 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4531 if (acl == NULL)
4532 goto out;
4533 acl->cached = 0;
4534 }
4535 acl->len = acl_len;
4536 out:
4537 nfs4_set_cached_acl(inode, acl);
4538 }
4539
4540 /*
4541 * The getxattr API returns the required buffer length when called with a
4542 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4543 * the required buf. On a NULL buf, we send a page of data to the server
4544 * guessing that the ACL request can be serviced by a page. If so, we cache
4545 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4546 * the cache. If not so, we throw away the page, and cache the required
4547 * length. The next getxattr call will then produce another round trip to
4548 * the server, this time with the input buf of the required size.
4549 */
4550 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4551 {
4552 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4553 struct nfs_getaclargs args = {
4554 .fh = NFS_FH(inode),
4555 .acl_pages = pages,
4556 .acl_len = buflen,
4557 };
4558 struct nfs_getaclres res = {
4559 .acl_len = buflen,
4560 };
4561 struct rpc_message msg = {
4562 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4563 .rpc_argp = &args,
4564 .rpc_resp = &res,
4565 };
4566 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4567 int ret = -ENOMEM, i;
4568
4569 /* As long as we're doing a round trip to the server anyway,
4570 * let's be prepared for a page of acl data. */
4571 if (npages == 0)
4572 npages = 1;
4573 if (npages > ARRAY_SIZE(pages))
4574 return -ERANGE;
4575
4576 for (i = 0; i < npages; i++) {
4577 pages[i] = alloc_page(GFP_KERNEL);
4578 if (!pages[i])
4579 goto out_free;
4580 }
4581
4582 /* for decoding across pages */
4583 res.acl_scratch = alloc_page(GFP_KERNEL);
4584 if (!res.acl_scratch)
4585 goto out_free;
4586
4587 args.acl_len = npages * PAGE_SIZE;
4588 args.acl_pgbase = 0;
4589
4590 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4591 __func__, buf, buflen, npages, args.acl_len);
4592 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4593 &msg, &args.seq_args, &res.seq_res, 0);
4594 if (ret)
4595 goto out_free;
4596
4597 /* Handle the case where the passed-in buffer is too short */
4598 if (res.acl_flags & NFS4_ACL_TRUNC) {
4599 /* Did the user only issue a request for the acl length? */
4600 if (buf == NULL)
4601 goto out_ok;
4602 ret = -ERANGE;
4603 goto out_free;
4604 }
4605 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4606 if (buf) {
4607 if (res.acl_len > buflen) {
4608 ret = -ERANGE;
4609 goto out_free;
4610 }
4611 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4612 }
4613 out_ok:
4614 ret = res.acl_len;
4615 out_free:
4616 for (i = 0; i < npages; i++)
4617 if (pages[i])
4618 __free_page(pages[i]);
4619 if (res.acl_scratch)
4620 __free_page(res.acl_scratch);
4621 return ret;
4622 }
4623
4624 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4625 {
4626 struct nfs4_exception exception = { };
4627 ssize_t ret;
4628 do {
4629 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4630 trace_nfs4_get_acl(inode, ret);
4631 if (ret >= 0)
4632 break;
4633 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4634 } while (exception.retry);
4635 return ret;
4636 }
4637
4638 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4639 {
4640 struct nfs_server *server = NFS_SERVER(inode);
4641 int ret;
4642
4643 if (!nfs4_server_supports_acls(server))
4644 return -EOPNOTSUPP;
4645 ret = nfs_revalidate_inode(server, inode);
4646 if (ret < 0)
4647 return ret;
4648 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4649 nfs_zap_acl_cache(inode);
4650 ret = nfs4_read_cached_acl(inode, buf, buflen);
4651 if (ret != -ENOENT)
4652 /* -ENOENT is returned if there is no ACL or if there is an ACL
4653 * but no cached acl data, just the acl length */
4654 return ret;
4655 return nfs4_get_acl_uncached(inode, buf, buflen);
4656 }
4657
4658 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4659 {
4660 struct nfs_server *server = NFS_SERVER(inode);
4661 struct page *pages[NFS4ACL_MAXPAGES];
4662 struct nfs_setaclargs arg = {
4663 .fh = NFS_FH(inode),
4664 .acl_pages = pages,
4665 .acl_len = buflen,
4666 };
4667 struct nfs_setaclres res;
4668 struct rpc_message msg = {
4669 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4670 .rpc_argp = &arg,
4671 .rpc_resp = &res,
4672 };
4673 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4674 int ret, i;
4675
4676 if (!nfs4_server_supports_acls(server))
4677 return -EOPNOTSUPP;
4678 if (npages > ARRAY_SIZE(pages))
4679 return -ERANGE;
4680 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
4681 if (i < 0)
4682 return i;
4683 nfs4_inode_return_delegation(inode);
4684 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4685
4686 /*
4687 * Free each page after tx, so the only ref left is
4688 * held by the network stack
4689 */
4690 for (; i > 0; i--)
4691 put_page(pages[i-1]);
4692
4693 /*
4694 * Acl update can result in inode attribute update.
4695 * so mark the attribute cache invalid.
4696 */
4697 spin_lock(&inode->i_lock);
4698 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4699 spin_unlock(&inode->i_lock);
4700 nfs_access_zap_cache(inode);
4701 nfs_zap_acl_cache(inode);
4702 return ret;
4703 }
4704
4705 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4706 {
4707 struct nfs4_exception exception = { };
4708 int err;
4709 do {
4710 err = __nfs4_proc_set_acl(inode, buf, buflen);
4711 trace_nfs4_set_acl(inode, err);
4712 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4713 &exception);
4714 } while (exception.retry);
4715 return err;
4716 }
4717
4718 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4719 static int _nfs4_get_security_label(struct inode *inode, void *buf,
4720 size_t buflen)
4721 {
4722 struct nfs_server *server = NFS_SERVER(inode);
4723 struct nfs_fattr fattr;
4724 struct nfs4_label label = {0, 0, buflen, buf};
4725
4726 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4727 struct nfs4_getattr_arg arg = {
4728 .fh = NFS_FH(inode),
4729 .bitmask = bitmask,
4730 };
4731 struct nfs4_getattr_res res = {
4732 .fattr = &fattr,
4733 .label = &label,
4734 .server = server,
4735 };
4736 struct rpc_message msg = {
4737 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4738 .rpc_argp = &arg,
4739 .rpc_resp = &res,
4740 };
4741 int ret;
4742
4743 nfs_fattr_init(&fattr);
4744
4745 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4746 if (ret)
4747 return ret;
4748 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4749 return -ENOENT;
4750 if (buflen < label.len)
4751 return -ERANGE;
4752 return 0;
4753 }
4754
4755 static int nfs4_get_security_label(struct inode *inode, void *buf,
4756 size_t buflen)
4757 {
4758 struct nfs4_exception exception = { };
4759 int err;
4760
4761 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4762 return -EOPNOTSUPP;
4763
4764 do {
4765 err = _nfs4_get_security_label(inode, buf, buflen);
4766 trace_nfs4_get_security_label(inode, err);
4767 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4768 &exception);
4769 } while (exception.retry);
4770 return err;
4771 }
4772
4773 static int _nfs4_do_set_security_label(struct inode *inode,
4774 struct nfs4_label *ilabel,
4775 struct nfs_fattr *fattr,
4776 struct nfs4_label *olabel)
4777 {
4778
4779 struct iattr sattr = {0};
4780 struct nfs_server *server = NFS_SERVER(inode);
4781 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4782 struct nfs_setattrargs arg = {
4783 .fh = NFS_FH(inode),
4784 .iap = &sattr,
4785 .server = server,
4786 .bitmask = bitmask,
4787 .label = ilabel,
4788 };
4789 struct nfs_setattrres res = {
4790 .fattr = fattr,
4791 .label = olabel,
4792 .server = server,
4793 };
4794 struct rpc_message msg = {
4795 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
4796 .rpc_argp = &arg,
4797 .rpc_resp = &res,
4798 };
4799 int status;
4800
4801 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
4802
4803 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4804 if (status)
4805 dprintk("%s failed: %d\n", __func__, status);
4806
4807 return status;
4808 }
4809
4810 static int nfs4_do_set_security_label(struct inode *inode,
4811 struct nfs4_label *ilabel,
4812 struct nfs_fattr *fattr,
4813 struct nfs4_label *olabel)
4814 {
4815 struct nfs4_exception exception = { };
4816 int err;
4817
4818 do {
4819 err = _nfs4_do_set_security_label(inode, ilabel,
4820 fattr, olabel);
4821 trace_nfs4_set_security_label(inode, err);
4822 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4823 &exception);
4824 } while (exception.retry);
4825 return err;
4826 }
4827
4828 static int
4829 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
4830 {
4831 struct nfs4_label ilabel, *olabel = NULL;
4832 struct nfs_fattr fattr;
4833 struct rpc_cred *cred;
4834 struct inode *inode = dentry->d_inode;
4835 int status;
4836
4837 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4838 return -EOPNOTSUPP;
4839
4840 nfs_fattr_init(&fattr);
4841
4842 ilabel.pi = 0;
4843 ilabel.lfs = 0;
4844 ilabel.label = (char *)buf;
4845 ilabel.len = buflen;
4846
4847 cred = rpc_lookup_cred();
4848 if (IS_ERR(cred))
4849 return PTR_ERR(cred);
4850
4851 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
4852 if (IS_ERR(olabel)) {
4853 status = -PTR_ERR(olabel);
4854 goto out;
4855 }
4856
4857 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
4858 if (status == 0)
4859 nfs_setsecurity(inode, &fattr, olabel);
4860
4861 nfs4_label_free(olabel);
4862 out:
4863 put_rpccred(cred);
4864 return status;
4865 }
4866 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
4867
4868
4869 static int
4870 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
4871 struct nfs4_state *state, long *timeout)
4872 {
4873 struct nfs_client *clp = server->nfs_client;
4874
4875 if (task->tk_status >= 0)
4876 return 0;
4877 switch(task->tk_status) {
4878 case -NFS4ERR_DELEG_REVOKED:
4879 case -NFS4ERR_ADMIN_REVOKED:
4880 case -NFS4ERR_BAD_STATEID:
4881 case -NFS4ERR_OPENMODE:
4882 if (state == NULL)
4883 break;
4884 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4885 goto recovery_failed;
4886 goto wait_on_recovery;
4887 case -NFS4ERR_EXPIRED:
4888 if (state != NULL) {
4889 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4890 goto recovery_failed;
4891 }
4892 case -NFS4ERR_STALE_STATEID:
4893 case -NFS4ERR_STALE_CLIENTID:
4894 nfs4_schedule_lease_recovery(clp);
4895 goto wait_on_recovery;
4896 case -NFS4ERR_MOVED:
4897 if (nfs4_schedule_migration_recovery(server) < 0)
4898 goto recovery_failed;
4899 goto wait_on_recovery;
4900 case -NFS4ERR_LEASE_MOVED:
4901 nfs4_schedule_lease_moved_recovery(clp);
4902 goto wait_on_recovery;
4903 #if defined(CONFIG_NFS_V4_1)
4904 case -NFS4ERR_BADSESSION:
4905 case -NFS4ERR_BADSLOT:
4906 case -NFS4ERR_BAD_HIGH_SLOT:
4907 case -NFS4ERR_DEADSESSION:
4908 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4909 case -NFS4ERR_SEQ_FALSE_RETRY:
4910 case -NFS4ERR_SEQ_MISORDERED:
4911 dprintk("%s ERROR %d, Reset session\n", __func__,
4912 task->tk_status);
4913 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4914 goto wait_on_recovery;
4915 #endif /* CONFIG_NFS_V4_1 */
4916 case -NFS4ERR_DELAY:
4917 nfs_inc_server_stats(server, NFSIOS_DELAY);
4918 rpc_delay(task, nfs4_update_delay(timeout));
4919 goto restart_call;
4920 case -NFS4ERR_GRACE:
4921 rpc_delay(task, NFS4_POLL_RETRY_MAX);
4922 case -NFS4ERR_RETRY_UNCACHED_REP:
4923 case -NFS4ERR_OLD_STATEID:
4924 goto restart_call;
4925 }
4926 task->tk_status = nfs4_map_errors(task->tk_status);
4927 return 0;
4928 recovery_failed:
4929 task->tk_status = -EIO;
4930 return 0;
4931 wait_on_recovery:
4932 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
4933 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
4934 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
4935 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
4936 goto recovery_failed;
4937 restart_call:
4938 task->tk_status = 0;
4939 return -EAGAIN;
4940 }
4941
4942 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
4943 nfs4_verifier *bootverf)
4944 {
4945 __be32 verf[2];
4946
4947 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
4948 /* An impossible timestamp guarantees this value
4949 * will never match a generated boot time. */
4950 verf[0] = 0;
4951 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
4952 } else {
4953 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
4954 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
4955 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
4956 }
4957 memcpy(bootverf->data, verf, sizeof(bootverf->data));
4958 }
4959
4960 static unsigned int
4961 nfs4_init_nonuniform_client_string(struct nfs_client *clp,
4962 char *buf, size_t len)
4963 {
4964 unsigned int result;
4965
4966 if (clp->cl_owner_id != NULL)
4967 return strlcpy(buf, clp->cl_owner_id, len);
4968
4969 rcu_read_lock();
4970 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
4971 clp->cl_ipaddr,
4972 rpc_peeraddr2str(clp->cl_rpcclient,
4973 RPC_DISPLAY_ADDR),
4974 rpc_peeraddr2str(clp->cl_rpcclient,
4975 RPC_DISPLAY_PROTO));
4976 rcu_read_unlock();
4977 clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
4978 return result;
4979 }
4980
4981 static unsigned int
4982 nfs4_init_uniform_client_string(struct nfs_client *clp,
4983 char *buf, size_t len)
4984 {
4985 const char *nodename = clp->cl_rpcclient->cl_nodename;
4986 unsigned int result;
4987
4988 if (clp->cl_owner_id != NULL)
4989 return strlcpy(buf, clp->cl_owner_id, len);
4990
4991 if (nfs4_client_id_uniquifier[0] != '\0')
4992 result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
4993 clp->rpc_ops->version,
4994 clp->cl_minorversion,
4995 nfs4_client_id_uniquifier,
4996 nodename);
4997 else
4998 result = scnprintf(buf, len, "Linux NFSv%u.%u %s",
4999 clp->rpc_ops->version, clp->cl_minorversion,
5000 nodename);
5001 clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
5002 return result;
5003 }
5004
5005 /*
5006 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5007 * services. Advertise one based on the address family of the
5008 * clientaddr.
5009 */
5010 static unsigned int
5011 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5012 {
5013 if (strchr(clp->cl_ipaddr, ':') != NULL)
5014 return scnprintf(buf, len, "tcp6");
5015 else
5016 return scnprintf(buf, len, "tcp");
5017 }
5018
5019 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5020 {
5021 struct nfs4_setclientid *sc = calldata;
5022
5023 if (task->tk_status == 0)
5024 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5025 }
5026
5027 static const struct rpc_call_ops nfs4_setclientid_ops = {
5028 .rpc_call_done = nfs4_setclientid_done,
5029 };
5030
5031 /**
5032 * nfs4_proc_setclientid - Negotiate client ID
5033 * @clp: state data structure
5034 * @program: RPC program for NFSv4 callback service
5035 * @port: IP port number for NFS4 callback service
5036 * @cred: RPC credential to use for this call
5037 * @res: where to place the result
5038 *
5039 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5040 */
5041 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5042 unsigned short port, struct rpc_cred *cred,
5043 struct nfs4_setclientid_res *res)
5044 {
5045 nfs4_verifier sc_verifier;
5046 struct nfs4_setclientid setclientid = {
5047 .sc_verifier = &sc_verifier,
5048 .sc_prog = program,
5049 .sc_cb_ident = clp->cl_cb_ident,
5050 };
5051 struct rpc_message msg = {
5052 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5053 .rpc_argp = &setclientid,
5054 .rpc_resp = res,
5055 .rpc_cred = cred,
5056 };
5057 struct rpc_task *task;
5058 struct rpc_task_setup task_setup_data = {
5059 .rpc_client = clp->cl_rpcclient,
5060 .rpc_message = &msg,
5061 .callback_ops = &nfs4_setclientid_ops,
5062 .callback_data = &setclientid,
5063 .flags = RPC_TASK_TIMEOUT,
5064 };
5065 int status;
5066
5067 /* nfs_client_id4 */
5068 nfs4_init_boot_verifier(clp, &sc_verifier);
5069 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5070 setclientid.sc_name_len =
5071 nfs4_init_uniform_client_string(clp,
5072 setclientid.sc_name,
5073 sizeof(setclientid.sc_name));
5074 else
5075 setclientid.sc_name_len =
5076 nfs4_init_nonuniform_client_string(clp,
5077 setclientid.sc_name,
5078 sizeof(setclientid.sc_name));
5079 /* cb_client4 */
5080 setclientid.sc_netid_len =
5081 nfs4_init_callback_netid(clp,
5082 setclientid.sc_netid,
5083 sizeof(setclientid.sc_netid));
5084 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5085 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5086 clp->cl_ipaddr, port >> 8, port & 255);
5087
5088 dprintk("NFS call setclientid auth=%s, '%.*s'\n",
5089 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5090 setclientid.sc_name_len, setclientid.sc_name);
5091 task = rpc_run_task(&task_setup_data);
5092 if (IS_ERR(task)) {
5093 status = PTR_ERR(task);
5094 goto out;
5095 }
5096 status = task->tk_status;
5097 if (setclientid.sc_cred) {
5098 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5099 put_rpccred(setclientid.sc_cred);
5100 }
5101 rpc_put_task(task);
5102 out:
5103 trace_nfs4_setclientid(clp, status);
5104 dprintk("NFS reply setclientid: %d\n", status);
5105 return status;
5106 }
5107
5108 /**
5109 * nfs4_proc_setclientid_confirm - Confirm client ID
5110 * @clp: state data structure
5111 * @res: result of a previous SETCLIENTID
5112 * @cred: RPC credential to use for this call
5113 *
5114 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5115 */
5116 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5117 struct nfs4_setclientid_res *arg,
5118 struct rpc_cred *cred)
5119 {
5120 struct rpc_message msg = {
5121 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5122 .rpc_argp = arg,
5123 .rpc_cred = cred,
5124 };
5125 int status;
5126
5127 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5128 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5129 clp->cl_clientid);
5130 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5131 trace_nfs4_setclientid_confirm(clp, status);
5132 dprintk("NFS reply setclientid_confirm: %d\n", status);
5133 return status;
5134 }
5135
5136 struct nfs4_delegreturndata {
5137 struct nfs4_delegreturnargs args;
5138 struct nfs4_delegreturnres res;
5139 struct nfs_fh fh;
5140 nfs4_stateid stateid;
5141 unsigned long timestamp;
5142 struct nfs_fattr fattr;
5143 int rpc_status;
5144 struct inode *inode;
5145 bool roc;
5146 u32 roc_barrier;
5147 };
5148
5149 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5150 {
5151 struct nfs4_delegreturndata *data = calldata;
5152
5153 if (!nfs4_sequence_done(task, &data->res.seq_res))
5154 return;
5155
5156 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5157 switch (task->tk_status) {
5158 case 0:
5159 renew_lease(data->res.server, data->timestamp);
5160 case -NFS4ERR_ADMIN_REVOKED:
5161 case -NFS4ERR_DELEG_REVOKED:
5162 case -NFS4ERR_BAD_STATEID:
5163 case -NFS4ERR_OLD_STATEID:
5164 case -NFS4ERR_STALE_STATEID:
5165 case -NFS4ERR_EXPIRED:
5166 task->tk_status = 0;
5167 if (data->roc)
5168 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5169 break;
5170 default:
5171 if (nfs4_async_handle_error(task, data->res.server,
5172 NULL, NULL) == -EAGAIN) {
5173 rpc_restart_call_prepare(task);
5174 return;
5175 }
5176 }
5177 data->rpc_status = task->tk_status;
5178 }
5179
5180 static void nfs4_delegreturn_release(void *calldata)
5181 {
5182 struct nfs4_delegreturndata *data = calldata;
5183 struct inode *inode = data->inode;
5184
5185 if (inode) {
5186 if (data->roc)
5187 pnfs_roc_release(inode);
5188 nfs_iput_and_deactive(inode);
5189 }
5190 kfree(calldata);
5191 }
5192
5193 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5194 {
5195 struct nfs4_delegreturndata *d_data;
5196
5197 d_data = (struct nfs4_delegreturndata *)data;
5198
5199 if (d_data->roc &&
5200 pnfs_roc_drain(d_data->inode, &d_data->roc_barrier, task))
5201 return;
5202
5203 nfs4_setup_sequence(d_data->res.server,
5204 &d_data->args.seq_args,
5205 &d_data->res.seq_res,
5206 task);
5207 }
5208
5209 static const struct rpc_call_ops nfs4_delegreturn_ops = {
5210 .rpc_call_prepare = nfs4_delegreturn_prepare,
5211 .rpc_call_done = nfs4_delegreturn_done,
5212 .rpc_release = nfs4_delegreturn_release,
5213 };
5214
5215 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5216 {
5217 struct nfs4_delegreturndata *data;
5218 struct nfs_server *server = NFS_SERVER(inode);
5219 struct rpc_task *task;
5220 struct rpc_message msg = {
5221 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5222 .rpc_cred = cred,
5223 };
5224 struct rpc_task_setup task_setup_data = {
5225 .rpc_client = server->client,
5226 .rpc_message = &msg,
5227 .callback_ops = &nfs4_delegreturn_ops,
5228 .flags = RPC_TASK_ASYNC,
5229 };
5230 int status = 0;
5231
5232 data = kzalloc(sizeof(*data), GFP_NOFS);
5233 if (data == NULL)
5234 return -ENOMEM;
5235 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5236 data->args.fhandle = &data->fh;
5237 data->args.stateid = &data->stateid;
5238 data->args.bitmask = server->cache_consistency_bitmask;
5239 nfs_copy_fh(&data->fh, NFS_FH(inode));
5240 nfs4_stateid_copy(&data->stateid, stateid);
5241 data->res.fattr = &data->fattr;
5242 data->res.server = server;
5243 nfs_fattr_init(data->res.fattr);
5244 data->timestamp = jiffies;
5245 data->rpc_status = 0;
5246 data->inode = nfs_igrab_and_active(inode);
5247 if (data->inode)
5248 data->roc = nfs4_roc(inode);
5249
5250 task_setup_data.callback_data = data;
5251 msg.rpc_argp = &data->args;
5252 msg.rpc_resp = &data->res;
5253 task = rpc_run_task(&task_setup_data);
5254 if (IS_ERR(task))
5255 return PTR_ERR(task);
5256 if (!issync)
5257 goto out;
5258 status = nfs4_wait_for_completion_rpc_task(task);
5259 if (status != 0)
5260 goto out;
5261 status = data->rpc_status;
5262 if (status == 0)
5263 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5264 else
5265 nfs_refresh_inode(inode, &data->fattr);
5266 out:
5267 rpc_put_task(task);
5268 return status;
5269 }
5270
5271 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5272 {
5273 struct nfs_server *server = NFS_SERVER(inode);
5274 struct nfs4_exception exception = { };
5275 int err;
5276 do {
5277 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5278 trace_nfs4_delegreturn(inode, err);
5279 switch (err) {
5280 case -NFS4ERR_STALE_STATEID:
5281 case -NFS4ERR_EXPIRED:
5282 case 0:
5283 return 0;
5284 }
5285 err = nfs4_handle_exception(server, err, &exception);
5286 } while (exception.retry);
5287 return err;
5288 }
5289
5290 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5291 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5292
5293 /*
5294 * sleep, with exponential backoff, and retry the LOCK operation.
5295 */
5296 static unsigned long
5297 nfs4_set_lock_task_retry(unsigned long timeout)
5298 {
5299 freezable_schedule_timeout_killable_unsafe(timeout);
5300 timeout <<= 1;
5301 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5302 return NFS4_LOCK_MAXTIMEOUT;
5303 return timeout;
5304 }
5305
5306 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5307 {
5308 struct inode *inode = state->inode;
5309 struct nfs_server *server = NFS_SERVER(inode);
5310 struct nfs_client *clp = server->nfs_client;
5311 struct nfs_lockt_args arg = {
5312 .fh = NFS_FH(inode),
5313 .fl = request,
5314 };
5315 struct nfs_lockt_res res = {
5316 .denied = request,
5317 };
5318 struct rpc_message msg = {
5319 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5320 .rpc_argp = &arg,
5321 .rpc_resp = &res,
5322 .rpc_cred = state->owner->so_cred,
5323 };
5324 struct nfs4_lock_state *lsp;
5325 int status;
5326
5327 arg.lock_owner.clientid = clp->cl_clientid;
5328 status = nfs4_set_lock_state(state, request);
5329 if (status != 0)
5330 goto out;
5331 lsp = request->fl_u.nfs4_fl.owner;
5332 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5333 arg.lock_owner.s_dev = server->s_dev;
5334 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5335 switch (status) {
5336 case 0:
5337 request->fl_type = F_UNLCK;
5338 break;
5339 case -NFS4ERR_DENIED:
5340 status = 0;
5341 }
5342 request->fl_ops->fl_release_private(request);
5343 request->fl_ops = NULL;
5344 out:
5345 return status;
5346 }
5347
5348 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5349 {
5350 struct nfs4_exception exception = { };
5351 int err;
5352
5353 do {
5354 err = _nfs4_proc_getlk(state, cmd, request);
5355 trace_nfs4_get_lock(request, state, cmd, err);
5356 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5357 &exception);
5358 } while (exception.retry);
5359 return err;
5360 }
5361
5362 static int do_vfs_lock(struct file *file, struct file_lock *fl)
5363 {
5364 int res = 0;
5365 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
5366 case FL_POSIX:
5367 res = posix_lock_file_wait(file, fl);
5368 break;
5369 case FL_FLOCK:
5370 res = flock_lock_file_wait(file, fl);
5371 break;
5372 default:
5373 BUG();
5374 }
5375 return res;
5376 }
5377
5378 struct nfs4_unlockdata {
5379 struct nfs_locku_args arg;
5380 struct nfs_locku_res res;
5381 struct nfs4_lock_state *lsp;
5382 struct nfs_open_context *ctx;
5383 struct file_lock fl;
5384 const struct nfs_server *server;
5385 unsigned long timestamp;
5386 };
5387
5388 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5389 struct nfs_open_context *ctx,
5390 struct nfs4_lock_state *lsp,
5391 struct nfs_seqid *seqid)
5392 {
5393 struct nfs4_unlockdata *p;
5394 struct inode *inode = lsp->ls_state->inode;
5395
5396 p = kzalloc(sizeof(*p), GFP_NOFS);
5397 if (p == NULL)
5398 return NULL;
5399 p->arg.fh = NFS_FH(inode);
5400 p->arg.fl = &p->fl;
5401 p->arg.seqid = seqid;
5402 p->res.seqid = seqid;
5403 p->lsp = lsp;
5404 atomic_inc(&lsp->ls_count);
5405 /* Ensure we don't close file until we're done freeing locks! */
5406 p->ctx = get_nfs_open_context(ctx);
5407 memcpy(&p->fl, fl, sizeof(p->fl));
5408 p->server = NFS_SERVER(inode);
5409 return p;
5410 }
5411
5412 static void nfs4_locku_release_calldata(void *data)
5413 {
5414 struct nfs4_unlockdata *calldata = data;
5415 nfs_free_seqid(calldata->arg.seqid);
5416 nfs4_put_lock_state(calldata->lsp);
5417 put_nfs_open_context(calldata->ctx);
5418 kfree(calldata);
5419 }
5420
5421 static void nfs4_locku_done(struct rpc_task *task, void *data)
5422 {
5423 struct nfs4_unlockdata *calldata = data;
5424
5425 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5426 return;
5427 switch (task->tk_status) {
5428 case 0:
5429 renew_lease(calldata->server, calldata->timestamp);
5430 do_vfs_lock(calldata->fl.fl_file, &calldata->fl);
5431 if (nfs4_update_lock_stateid(calldata->lsp,
5432 &calldata->res.stateid))
5433 break;
5434 case -NFS4ERR_BAD_STATEID:
5435 case -NFS4ERR_OLD_STATEID:
5436 case -NFS4ERR_STALE_STATEID:
5437 case -NFS4ERR_EXPIRED:
5438 if (!nfs4_stateid_match(&calldata->arg.stateid,
5439 &calldata->lsp->ls_stateid))
5440 rpc_restart_call_prepare(task);
5441 break;
5442 default:
5443 if (nfs4_async_handle_error(task, calldata->server,
5444 NULL, NULL) == -EAGAIN)
5445 rpc_restart_call_prepare(task);
5446 }
5447 nfs_release_seqid(calldata->arg.seqid);
5448 }
5449
5450 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5451 {
5452 struct nfs4_unlockdata *calldata = data;
5453
5454 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5455 goto out_wait;
5456 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5457 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5458 /* Note: exit _without_ running nfs4_locku_done */
5459 goto out_no_action;
5460 }
5461 calldata->timestamp = jiffies;
5462 if (nfs4_setup_sequence(calldata->server,
5463 &calldata->arg.seq_args,
5464 &calldata->res.seq_res,
5465 task) != 0)
5466 nfs_release_seqid(calldata->arg.seqid);
5467 return;
5468 out_no_action:
5469 task->tk_action = NULL;
5470 out_wait:
5471 nfs4_sequence_done(task, &calldata->res.seq_res);
5472 }
5473
5474 static const struct rpc_call_ops nfs4_locku_ops = {
5475 .rpc_call_prepare = nfs4_locku_prepare,
5476 .rpc_call_done = nfs4_locku_done,
5477 .rpc_release = nfs4_locku_release_calldata,
5478 };
5479
5480 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5481 struct nfs_open_context *ctx,
5482 struct nfs4_lock_state *lsp,
5483 struct nfs_seqid *seqid)
5484 {
5485 struct nfs4_unlockdata *data;
5486 struct rpc_message msg = {
5487 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5488 .rpc_cred = ctx->cred,
5489 };
5490 struct rpc_task_setup task_setup_data = {
5491 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5492 .rpc_message = &msg,
5493 .callback_ops = &nfs4_locku_ops,
5494 .workqueue = nfsiod_workqueue,
5495 .flags = RPC_TASK_ASYNC,
5496 };
5497
5498 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5499 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5500
5501 /* Ensure this is an unlock - when canceling a lock, the
5502 * canceled lock is passed in, and it won't be an unlock.
5503 */
5504 fl->fl_type = F_UNLCK;
5505
5506 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5507 if (data == NULL) {
5508 nfs_free_seqid(seqid);
5509 return ERR_PTR(-ENOMEM);
5510 }
5511
5512 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5513 msg.rpc_argp = &data->arg;
5514 msg.rpc_resp = &data->res;
5515 task_setup_data.callback_data = data;
5516 return rpc_run_task(&task_setup_data);
5517 }
5518
5519 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5520 {
5521 struct inode *inode = state->inode;
5522 struct nfs4_state_owner *sp = state->owner;
5523 struct nfs_inode *nfsi = NFS_I(inode);
5524 struct nfs_seqid *seqid;
5525 struct nfs4_lock_state *lsp;
5526 struct rpc_task *task;
5527 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5528 int status = 0;
5529 unsigned char fl_flags = request->fl_flags;
5530
5531 status = nfs4_set_lock_state(state, request);
5532 /* Unlock _before_ we do the RPC call */
5533 request->fl_flags |= FL_EXISTS;
5534 /* Exclude nfs_delegation_claim_locks() */
5535 mutex_lock(&sp->so_delegreturn_mutex);
5536 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5537 down_read(&nfsi->rwsem);
5538 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
5539 up_read(&nfsi->rwsem);
5540 mutex_unlock(&sp->so_delegreturn_mutex);
5541 goto out;
5542 }
5543 up_read(&nfsi->rwsem);
5544 mutex_unlock(&sp->so_delegreturn_mutex);
5545 if (status != 0)
5546 goto out;
5547 /* Is this a delegated lock? */
5548 lsp = request->fl_u.nfs4_fl.owner;
5549 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5550 goto out;
5551 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5552 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5553 status = -ENOMEM;
5554 if (IS_ERR(seqid))
5555 goto out;
5556 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5557 status = PTR_ERR(task);
5558 if (IS_ERR(task))
5559 goto out;
5560 status = nfs4_wait_for_completion_rpc_task(task);
5561 rpc_put_task(task);
5562 out:
5563 request->fl_flags = fl_flags;
5564 trace_nfs4_unlock(request, state, F_SETLK, status);
5565 return status;
5566 }
5567
5568 struct nfs4_lockdata {
5569 struct nfs_lock_args arg;
5570 struct nfs_lock_res res;
5571 struct nfs4_lock_state *lsp;
5572 struct nfs_open_context *ctx;
5573 struct file_lock fl;
5574 unsigned long timestamp;
5575 int rpc_status;
5576 int cancelled;
5577 struct nfs_server *server;
5578 };
5579
5580 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5581 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5582 gfp_t gfp_mask)
5583 {
5584 struct nfs4_lockdata *p;
5585 struct inode *inode = lsp->ls_state->inode;
5586 struct nfs_server *server = NFS_SERVER(inode);
5587 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5588
5589 p = kzalloc(sizeof(*p), gfp_mask);
5590 if (p == NULL)
5591 return NULL;
5592
5593 p->arg.fh = NFS_FH(inode);
5594 p->arg.fl = &p->fl;
5595 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5596 if (IS_ERR(p->arg.open_seqid))
5597 goto out_free;
5598 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5599 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5600 if (IS_ERR(p->arg.lock_seqid))
5601 goto out_free_seqid;
5602 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5603 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5604 p->arg.lock_owner.s_dev = server->s_dev;
5605 p->res.lock_seqid = p->arg.lock_seqid;
5606 p->lsp = lsp;
5607 p->server = server;
5608 atomic_inc(&lsp->ls_count);
5609 p->ctx = get_nfs_open_context(ctx);
5610 memcpy(&p->fl, fl, sizeof(p->fl));
5611 return p;
5612 out_free_seqid:
5613 nfs_free_seqid(p->arg.open_seqid);
5614 out_free:
5615 kfree(p);
5616 return NULL;
5617 }
5618
5619 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5620 {
5621 struct nfs4_lockdata *data = calldata;
5622 struct nfs4_state *state = data->lsp->ls_state;
5623
5624 dprintk("%s: begin!\n", __func__);
5625 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5626 goto out_wait;
5627 /* Do we need to do an open_to_lock_owner? */
5628 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5629 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5630 goto out_release_lock_seqid;
5631 }
5632 nfs4_stateid_copy(&data->arg.open_stateid,
5633 &state->open_stateid);
5634 data->arg.new_lock_owner = 1;
5635 data->res.open_seqid = data->arg.open_seqid;
5636 } else {
5637 data->arg.new_lock_owner = 0;
5638 nfs4_stateid_copy(&data->arg.lock_stateid,
5639 &data->lsp->ls_stateid);
5640 }
5641 if (!nfs4_valid_open_stateid(state)) {
5642 data->rpc_status = -EBADF;
5643 task->tk_action = NULL;
5644 goto out_release_open_seqid;
5645 }
5646 data->timestamp = jiffies;
5647 if (nfs4_setup_sequence(data->server,
5648 &data->arg.seq_args,
5649 &data->res.seq_res,
5650 task) == 0)
5651 return;
5652 out_release_open_seqid:
5653 nfs_release_seqid(data->arg.open_seqid);
5654 out_release_lock_seqid:
5655 nfs_release_seqid(data->arg.lock_seqid);
5656 out_wait:
5657 nfs4_sequence_done(task, &data->res.seq_res);
5658 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5659 }
5660
5661 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5662 {
5663 struct nfs4_lockdata *data = calldata;
5664 struct nfs4_lock_state *lsp = data->lsp;
5665
5666 dprintk("%s: begin!\n", __func__);
5667
5668 if (!nfs4_sequence_done(task, &data->res.seq_res))
5669 return;
5670
5671 data->rpc_status = task->tk_status;
5672 switch (task->tk_status) {
5673 case 0:
5674 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode),
5675 data->timestamp);
5676 if (data->arg.new_lock) {
5677 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5678 if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) {
5679 rpc_restart_call_prepare(task);
5680 break;
5681 }
5682 }
5683 if (data->arg.new_lock_owner != 0) {
5684 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5685 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5686 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5687 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5688 rpc_restart_call_prepare(task);
5689 break;
5690 case -NFS4ERR_BAD_STATEID:
5691 case -NFS4ERR_OLD_STATEID:
5692 case -NFS4ERR_STALE_STATEID:
5693 case -NFS4ERR_EXPIRED:
5694 if (data->arg.new_lock_owner != 0) {
5695 if (!nfs4_stateid_match(&data->arg.open_stateid,
5696 &lsp->ls_state->open_stateid))
5697 rpc_restart_call_prepare(task);
5698 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5699 &lsp->ls_stateid))
5700 rpc_restart_call_prepare(task);
5701 }
5702 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5703 }
5704
5705 static void nfs4_lock_release(void *calldata)
5706 {
5707 struct nfs4_lockdata *data = calldata;
5708
5709 dprintk("%s: begin!\n", __func__);
5710 nfs_free_seqid(data->arg.open_seqid);
5711 if (data->cancelled != 0) {
5712 struct rpc_task *task;
5713 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5714 data->arg.lock_seqid);
5715 if (!IS_ERR(task))
5716 rpc_put_task_async(task);
5717 dprintk("%s: cancelling lock!\n", __func__);
5718 } else
5719 nfs_free_seqid(data->arg.lock_seqid);
5720 nfs4_put_lock_state(data->lsp);
5721 put_nfs_open_context(data->ctx);
5722 kfree(data);
5723 dprintk("%s: done!\n", __func__);
5724 }
5725
5726 static const struct rpc_call_ops nfs4_lock_ops = {
5727 .rpc_call_prepare = nfs4_lock_prepare,
5728 .rpc_call_done = nfs4_lock_done,
5729 .rpc_release = nfs4_lock_release,
5730 };
5731
5732 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5733 {
5734 switch (error) {
5735 case -NFS4ERR_ADMIN_REVOKED:
5736 case -NFS4ERR_BAD_STATEID:
5737 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5738 if (new_lock_owner != 0 ||
5739 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5740 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5741 break;
5742 case -NFS4ERR_STALE_STATEID:
5743 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5744 case -NFS4ERR_EXPIRED:
5745 nfs4_schedule_lease_recovery(server->nfs_client);
5746 };
5747 }
5748
5749 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5750 {
5751 struct nfs4_lockdata *data;
5752 struct rpc_task *task;
5753 struct rpc_message msg = {
5754 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5755 .rpc_cred = state->owner->so_cred,
5756 };
5757 struct rpc_task_setup task_setup_data = {
5758 .rpc_client = NFS_CLIENT(state->inode),
5759 .rpc_message = &msg,
5760 .callback_ops = &nfs4_lock_ops,
5761 .workqueue = nfsiod_workqueue,
5762 .flags = RPC_TASK_ASYNC,
5763 };
5764 int ret;
5765
5766 dprintk("%s: begin!\n", __func__);
5767 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
5768 fl->fl_u.nfs4_fl.owner,
5769 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
5770 if (data == NULL)
5771 return -ENOMEM;
5772 if (IS_SETLKW(cmd))
5773 data->arg.block = 1;
5774 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5775 msg.rpc_argp = &data->arg;
5776 msg.rpc_resp = &data->res;
5777 task_setup_data.callback_data = data;
5778 if (recovery_type > NFS_LOCK_NEW) {
5779 if (recovery_type == NFS_LOCK_RECLAIM)
5780 data->arg.reclaim = NFS_LOCK_RECLAIM;
5781 nfs4_set_sequence_privileged(&data->arg.seq_args);
5782 } else
5783 data->arg.new_lock = 1;
5784 task = rpc_run_task(&task_setup_data);
5785 if (IS_ERR(task))
5786 return PTR_ERR(task);
5787 ret = nfs4_wait_for_completion_rpc_task(task);
5788 if (ret == 0) {
5789 ret = data->rpc_status;
5790 if (ret)
5791 nfs4_handle_setlk_error(data->server, data->lsp,
5792 data->arg.new_lock_owner, ret);
5793 } else
5794 data->cancelled = 1;
5795 rpc_put_task(task);
5796 dprintk("%s: done, ret = %d!\n", __func__, ret);
5797 return ret;
5798 }
5799
5800 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
5801 {
5802 struct nfs_server *server = NFS_SERVER(state->inode);
5803 struct nfs4_exception exception = {
5804 .inode = state->inode,
5805 };
5806 int err;
5807
5808 do {
5809 /* Cache the lock if possible... */
5810 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5811 return 0;
5812 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5813 trace_nfs4_lock_reclaim(request, state, F_SETLK, err);
5814 if (err != -NFS4ERR_DELAY)
5815 break;
5816 nfs4_handle_exception(server, err, &exception);
5817 } while (exception.retry);
5818 return err;
5819 }
5820
5821 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5822 {
5823 struct nfs_server *server = NFS_SERVER(state->inode);
5824 struct nfs4_exception exception = {
5825 .inode = state->inode,
5826 };
5827 int err;
5828
5829 err = nfs4_set_lock_state(state, request);
5830 if (err != 0)
5831 return err;
5832 if (!recover_lost_locks) {
5833 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
5834 return 0;
5835 }
5836 do {
5837 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5838 return 0;
5839 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5840 trace_nfs4_lock_expired(request, state, F_SETLK, err);
5841 switch (err) {
5842 default:
5843 goto out;
5844 case -NFS4ERR_GRACE:
5845 case -NFS4ERR_DELAY:
5846 nfs4_handle_exception(server, err, &exception);
5847 err = 0;
5848 }
5849 } while (exception.retry);
5850 out:
5851 return err;
5852 }
5853
5854 #if defined(CONFIG_NFS_V4_1)
5855 /**
5856 * nfs41_check_expired_locks - possibly free a lock stateid
5857 *
5858 * @state: NFSv4 state for an inode
5859 *
5860 * Returns NFS_OK if recovery for this stateid is now finished.
5861 * Otherwise a negative NFS4ERR value is returned.
5862 */
5863 static int nfs41_check_expired_locks(struct nfs4_state *state)
5864 {
5865 int status, ret = -NFS4ERR_BAD_STATEID;
5866 struct nfs4_lock_state *lsp;
5867 struct nfs_server *server = NFS_SERVER(state->inode);
5868
5869 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
5870 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
5871 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
5872
5873 status = nfs41_test_stateid(server,
5874 &lsp->ls_stateid,
5875 cred);
5876 trace_nfs4_test_lock_stateid(state, lsp, status);
5877 if (status != NFS_OK) {
5878 /* Free the stateid unless the server
5879 * informs us the stateid is unrecognized. */
5880 if (status != -NFS4ERR_BAD_STATEID)
5881 nfs41_free_stateid(server,
5882 &lsp->ls_stateid,
5883 cred);
5884 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5885 ret = status;
5886 }
5887 }
5888 };
5889
5890 return ret;
5891 }
5892
5893 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
5894 {
5895 int status = NFS_OK;
5896
5897 if (test_bit(LK_STATE_IN_USE, &state->flags))
5898 status = nfs41_check_expired_locks(state);
5899 if (status != NFS_OK)
5900 status = nfs4_lock_expired(state, request);
5901 return status;
5902 }
5903 #endif
5904
5905 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5906 {
5907 struct nfs_inode *nfsi = NFS_I(state->inode);
5908 unsigned char fl_flags = request->fl_flags;
5909 int status = -ENOLCK;
5910
5911 if ((fl_flags & FL_POSIX) &&
5912 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
5913 goto out;
5914 /* Is this a delegated open? */
5915 status = nfs4_set_lock_state(state, request);
5916 if (status != 0)
5917 goto out;
5918 request->fl_flags |= FL_ACCESS;
5919 status = do_vfs_lock(request->fl_file, request);
5920 if (status < 0)
5921 goto out;
5922 down_read(&nfsi->rwsem);
5923 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
5924 /* Yes: cache locks! */
5925 /* ...but avoid races with delegation recall... */
5926 request->fl_flags = fl_flags & ~FL_SLEEP;
5927 status = do_vfs_lock(request->fl_file, request);
5928 up_read(&nfsi->rwsem);
5929 goto out;
5930 }
5931 up_read(&nfsi->rwsem);
5932 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
5933 out:
5934 request->fl_flags = fl_flags;
5935 return status;
5936 }
5937
5938 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5939 {
5940 struct nfs4_exception exception = {
5941 .state = state,
5942 .inode = state->inode,
5943 };
5944 int err;
5945
5946 do {
5947 err = _nfs4_proc_setlk(state, cmd, request);
5948 trace_nfs4_set_lock(request, state, cmd, err);
5949 if (err == -NFS4ERR_DENIED)
5950 err = -EAGAIN;
5951 err = nfs4_handle_exception(NFS_SERVER(state->inode),
5952 err, &exception);
5953 } while (exception.retry);
5954 return err;
5955 }
5956
5957 static int
5958 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
5959 {
5960 struct nfs_open_context *ctx;
5961 struct nfs4_state *state;
5962 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
5963 int status;
5964
5965 /* verify open state */
5966 ctx = nfs_file_open_context(filp);
5967 state = ctx->state;
5968
5969 if (request->fl_start < 0 || request->fl_end < 0)
5970 return -EINVAL;
5971
5972 if (IS_GETLK(cmd)) {
5973 if (state != NULL)
5974 return nfs4_proc_getlk(state, F_GETLK, request);
5975 return 0;
5976 }
5977
5978 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
5979 return -EINVAL;
5980
5981 if (request->fl_type == F_UNLCK) {
5982 if (state != NULL)
5983 return nfs4_proc_unlck(state, cmd, request);
5984 return 0;
5985 }
5986
5987 if (state == NULL)
5988 return -ENOLCK;
5989 /*
5990 * Don't rely on the VFS having checked the file open mode,
5991 * since it won't do this for flock() locks.
5992 */
5993 switch (request->fl_type) {
5994 case F_RDLCK:
5995 if (!(filp->f_mode & FMODE_READ))
5996 return -EBADF;
5997 break;
5998 case F_WRLCK:
5999 if (!(filp->f_mode & FMODE_WRITE))
6000 return -EBADF;
6001 }
6002
6003 do {
6004 status = nfs4_proc_setlk(state, cmd, request);
6005 if ((status != -EAGAIN) || IS_SETLK(cmd))
6006 break;
6007 timeout = nfs4_set_lock_task_retry(timeout);
6008 status = -ERESTARTSYS;
6009 if (signalled())
6010 break;
6011 } while(status < 0);
6012 return status;
6013 }
6014
6015 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6016 {
6017 struct nfs_server *server = NFS_SERVER(state->inode);
6018 int err;
6019
6020 err = nfs4_set_lock_state(state, fl);
6021 if (err != 0)
6022 return err;
6023 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6024 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6025 }
6026
6027 struct nfs_release_lockowner_data {
6028 struct nfs4_lock_state *lsp;
6029 struct nfs_server *server;
6030 struct nfs_release_lockowner_args args;
6031 struct nfs_release_lockowner_res res;
6032 unsigned long timestamp;
6033 };
6034
6035 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6036 {
6037 struct nfs_release_lockowner_data *data = calldata;
6038 struct nfs_server *server = data->server;
6039 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6040 &data->args.seq_args, &data->res.seq_res, task);
6041 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6042 data->timestamp = jiffies;
6043 }
6044
6045 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6046 {
6047 struct nfs_release_lockowner_data *data = calldata;
6048 struct nfs_server *server = data->server;
6049
6050 nfs40_sequence_done(task, &data->res.seq_res);
6051
6052 switch (task->tk_status) {
6053 case 0:
6054 renew_lease(server, data->timestamp);
6055 break;
6056 case -NFS4ERR_STALE_CLIENTID:
6057 case -NFS4ERR_EXPIRED:
6058 nfs4_schedule_lease_recovery(server->nfs_client);
6059 break;
6060 case -NFS4ERR_LEASE_MOVED:
6061 case -NFS4ERR_DELAY:
6062 if (nfs4_async_handle_error(task, server,
6063 NULL, NULL) == -EAGAIN)
6064 rpc_restart_call_prepare(task);
6065 }
6066 }
6067
6068 static void nfs4_release_lockowner_release(void *calldata)
6069 {
6070 struct nfs_release_lockowner_data *data = calldata;
6071 nfs4_free_lock_state(data->server, data->lsp);
6072 kfree(calldata);
6073 }
6074
6075 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6076 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6077 .rpc_call_done = nfs4_release_lockowner_done,
6078 .rpc_release = nfs4_release_lockowner_release,
6079 };
6080
6081 static void
6082 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6083 {
6084 struct nfs_release_lockowner_data *data;
6085 struct rpc_message msg = {
6086 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6087 };
6088
6089 if (server->nfs_client->cl_mvops->minor_version != 0)
6090 return;
6091
6092 data = kmalloc(sizeof(*data), GFP_NOFS);
6093 if (!data)
6094 return;
6095 data->lsp = lsp;
6096 data->server = server;
6097 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6098 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6099 data->args.lock_owner.s_dev = server->s_dev;
6100
6101 msg.rpc_argp = &data->args;
6102 msg.rpc_resp = &data->res;
6103 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6104 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6105 }
6106
6107 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6108
6109 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
6110 const void *buf, size_t buflen,
6111 int flags, int type)
6112 {
6113 if (strcmp(key, "") != 0)
6114 return -EINVAL;
6115
6116 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
6117 }
6118
6119 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
6120 void *buf, size_t buflen, int type)
6121 {
6122 if (strcmp(key, "") != 0)
6123 return -EINVAL;
6124
6125 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
6126 }
6127
6128 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
6129 size_t list_len, const char *name,
6130 size_t name_len, int type)
6131 {
6132 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
6133
6134 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
6135 return 0;
6136
6137 if (list && len <= list_len)
6138 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
6139 return len;
6140 }
6141
6142 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6143 static inline int nfs4_server_supports_labels(struct nfs_server *server)
6144 {
6145 return server->caps & NFS_CAP_SECURITY_LABEL;
6146 }
6147
6148 static int nfs4_xattr_set_nfs4_label(struct dentry *dentry, const char *key,
6149 const void *buf, size_t buflen,
6150 int flags, int type)
6151 {
6152 if (security_ismaclabel(key))
6153 return nfs4_set_security_label(dentry, buf, buflen);
6154
6155 return -EOPNOTSUPP;
6156 }
6157
6158 static int nfs4_xattr_get_nfs4_label(struct dentry *dentry, const char *key,
6159 void *buf, size_t buflen, int type)
6160 {
6161 if (security_ismaclabel(key))
6162 return nfs4_get_security_label(dentry->d_inode, buf, buflen);
6163 return -EOPNOTSUPP;
6164 }
6165
6166 static size_t nfs4_xattr_list_nfs4_label(struct dentry *dentry, char *list,
6167 size_t list_len, const char *name,
6168 size_t name_len, int type)
6169 {
6170 size_t len = 0;
6171
6172 if (nfs_server_capable(dentry->d_inode, NFS_CAP_SECURITY_LABEL)) {
6173 len = security_inode_listsecurity(dentry->d_inode, NULL, 0);
6174 if (list && len <= list_len)
6175 security_inode_listsecurity(dentry->d_inode, list, len);
6176 }
6177 return len;
6178 }
6179
6180 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6181 .prefix = XATTR_SECURITY_PREFIX,
6182 .list = nfs4_xattr_list_nfs4_label,
6183 .get = nfs4_xattr_get_nfs4_label,
6184 .set = nfs4_xattr_set_nfs4_label,
6185 };
6186 #endif
6187
6188
6189 /*
6190 * nfs_fhget will use either the mounted_on_fileid or the fileid
6191 */
6192 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6193 {
6194 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6195 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6196 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6197 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6198 return;
6199
6200 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6201 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6202 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6203 fattr->nlink = 2;
6204 }
6205
6206 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6207 const struct qstr *name,
6208 struct nfs4_fs_locations *fs_locations,
6209 struct page *page)
6210 {
6211 struct nfs_server *server = NFS_SERVER(dir);
6212 u32 bitmask[3] = {
6213 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6214 };
6215 struct nfs4_fs_locations_arg args = {
6216 .dir_fh = NFS_FH(dir),
6217 .name = name,
6218 .page = page,
6219 .bitmask = bitmask,
6220 };
6221 struct nfs4_fs_locations_res res = {
6222 .fs_locations = fs_locations,
6223 };
6224 struct rpc_message msg = {
6225 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6226 .rpc_argp = &args,
6227 .rpc_resp = &res,
6228 };
6229 int status;
6230
6231 dprintk("%s: start\n", __func__);
6232
6233 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6234 * is not supported */
6235 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6236 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6237 else
6238 bitmask[0] |= FATTR4_WORD0_FILEID;
6239
6240 nfs_fattr_init(&fs_locations->fattr);
6241 fs_locations->server = server;
6242 fs_locations->nlocations = 0;
6243 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6244 dprintk("%s: returned status = %d\n", __func__, status);
6245 return status;
6246 }
6247
6248 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6249 const struct qstr *name,
6250 struct nfs4_fs_locations *fs_locations,
6251 struct page *page)
6252 {
6253 struct nfs4_exception exception = { };
6254 int err;
6255 do {
6256 err = _nfs4_proc_fs_locations(client, dir, name,
6257 fs_locations, page);
6258 trace_nfs4_get_fs_locations(dir, name, err);
6259 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6260 &exception);
6261 } while (exception.retry);
6262 return err;
6263 }
6264
6265 /*
6266 * This operation also signals the server that this client is
6267 * performing migration recovery. The server can stop returning
6268 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6269 * appended to this compound to identify the client ID which is
6270 * performing recovery.
6271 */
6272 static int _nfs40_proc_get_locations(struct inode *inode,
6273 struct nfs4_fs_locations *locations,
6274 struct page *page, struct rpc_cred *cred)
6275 {
6276 struct nfs_server *server = NFS_SERVER(inode);
6277 struct rpc_clnt *clnt = server->client;
6278 u32 bitmask[2] = {
6279 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6280 };
6281 struct nfs4_fs_locations_arg args = {
6282 .clientid = server->nfs_client->cl_clientid,
6283 .fh = NFS_FH(inode),
6284 .page = page,
6285 .bitmask = bitmask,
6286 .migration = 1, /* skip LOOKUP */
6287 .renew = 1, /* append RENEW */
6288 };
6289 struct nfs4_fs_locations_res res = {
6290 .fs_locations = locations,
6291 .migration = 1,
6292 .renew = 1,
6293 };
6294 struct rpc_message msg = {
6295 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6296 .rpc_argp = &args,
6297 .rpc_resp = &res,
6298 .rpc_cred = cred,
6299 };
6300 unsigned long now = jiffies;
6301 int status;
6302
6303 nfs_fattr_init(&locations->fattr);
6304 locations->server = server;
6305 locations->nlocations = 0;
6306
6307 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6308 nfs4_set_sequence_privileged(&args.seq_args);
6309 status = nfs4_call_sync_sequence(clnt, server, &msg,
6310 &args.seq_args, &res.seq_res);
6311 if (status)
6312 return status;
6313
6314 renew_lease(server, now);
6315 return 0;
6316 }
6317
6318 #ifdef CONFIG_NFS_V4_1
6319
6320 /*
6321 * This operation also signals the server that this client is
6322 * performing migration recovery. The server can stop asserting
6323 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6324 * performing this operation is identified in the SEQUENCE
6325 * operation in this compound.
6326 *
6327 * When the client supports GETATTR(fs_locations_info), it can
6328 * be plumbed in here.
6329 */
6330 static int _nfs41_proc_get_locations(struct inode *inode,
6331 struct nfs4_fs_locations *locations,
6332 struct page *page, struct rpc_cred *cred)
6333 {
6334 struct nfs_server *server = NFS_SERVER(inode);
6335 struct rpc_clnt *clnt = server->client;
6336 u32 bitmask[2] = {
6337 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6338 };
6339 struct nfs4_fs_locations_arg args = {
6340 .fh = NFS_FH(inode),
6341 .page = page,
6342 .bitmask = bitmask,
6343 .migration = 1, /* skip LOOKUP */
6344 };
6345 struct nfs4_fs_locations_res res = {
6346 .fs_locations = locations,
6347 .migration = 1,
6348 };
6349 struct rpc_message msg = {
6350 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6351 .rpc_argp = &args,
6352 .rpc_resp = &res,
6353 .rpc_cred = cred,
6354 };
6355 int status;
6356
6357 nfs_fattr_init(&locations->fattr);
6358 locations->server = server;
6359 locations->nlocations = 0;
6360
6361 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6362 nfs4_set_sequence_privileged(&args.seq_args);
6363 status = nfs4_call_sync_sequence(clnt, server, &msg,
6364 &args.seq_args, &res.seq_res);
6365 if (status == NFS4_OK &&
6366 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6367 status = -NFS4ERR_LEASE_MOVED;
6368 return status;
6369 }
6370
6371 #endif /* CONFIG_NFS_V4_1 */
6372
6373 /**
6374 * nfs4_proc_get_locations - discover locations for a migrated FSID
6375 * @inode: inode on FSID that is migrating
6376 * @locations: result of query
6377 * @page: buffer
6378 * @cred: credential to use for this operation
6379 *
6380 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6381 * operation failed, or a negative errno if a local error occurred.
6382 *
6383 * On success, "locations" is filled in, but if the server has
6384 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6385 * asserted.
6386 *
6387 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6388 * from this client that require migration recovery.
6389 */
6390 int nfs4_proc_get_locations(struct inode *inode,
6391 struct nfs4_fs_locations *locations,
6392 struct page *page, struct rpc_cred *cred)
6393 {
6394 struct nfs_server *server = NFS_SERVER(inode);
6395 struct nfs_client *clp = server->nfs_client;
6396 const struct nfs4_mig_recovery_ops *ops =
6397 clp->cl_mvops->mig_recovery_ops;
6398 struct nfs4_exception exception = { };
6399 int status;
6400
6401 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6402 (unsigned long long)server->fsid.major,
6403 (unsigned long long)server->fsid.minor,
6404 clp->cl_hostname);
6405 nfs_display_fhandle(NFS_FH(inode), __func__);
6406
6407 do {
6408 status = ops->get_locations(inode, locations, page, cred);
6409 if (status != -NFS4ERR_DELAY)
6410 break;
6411 nfs4_handle_exception(server, status, &exception);
6412 } while (exception.retry);
6413 return status;
6414 }
6415
6416 /*
6417 * This operation also signals the server that this client is
6418 * performing "lease moved" recovery. The server can stop
6419 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6420 * is appended to this compound to identify the client ID which is
6421 * performing recovery.
6422 */
6423 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6424 {
6425 struct nfs_server *server = NFS_SERVER(inode);
6426 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6427 struct rpc_clnt *clnt = server->client;
6428 struct nfs4_fsid_present_arg args = {
6429 .fh = NFS_FH(inode),
6430 .clientid = clp->cl_clientid,
6431 .renew = 1, /* append RENEW */
6432 };
6433 struct nfs4_fsid_present_res res = {
6434 .renew = 1,
6435 };
6436 struct rpc_message msg = {
6437 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6438 .rpc_argp = &args,
6439 .rpc_resp = &res,
6440 .rpc_cred = cred,
6441 };
6442 unsigned long now = jiffies;
6443 int status;
6444
6445 res.fh = nfs_alloc_fhandle();
6446 if (res.fh == NULL)
6447 return -ENOMEM;
6448
6449 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6450 nfs4_set_sequence_privileged(&args.seq_args);
6451 status = nfs4_call_sync_sequence(clnt, server, &msg,
6452 &args.seq_args, &res.seq_res);
6453 nfs_free_fhandle(res.fh);
6454 if (status)
6455 return status;
6456
6457 do_renew_lease(clp, now);
6458 return 0;
6459 }
6460
6461 #ifdef CONFIG_NFS_V4_1
6462
6463 /*
6464 * This operation also signals the server that this client is
6465 * performing "lease moved" recovery. The server can stop asserting
6466 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6467 * this operation is identified in the SEQUENCE operation in this
6468 * compound.
6469 */
6470 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6471 {
6472 struct nfs_server *server = NFS_SERVER(inode);
6473 struct rpc_clnt *clnt = server->client;
6474 struct nfs4_fsid_present_arg args = {
6475 .fh = NFS_FH(inode),
6476 };
6477 struct nfs4_fsid_present_res res = {
6478 };
6479 struct rpc_message msg = {
6480 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6481 .rpc_argp = &args,
6482 .rpc_resp = &res,
6483 .rpc_cred = cred,
6484 };
6485 int status;
6486
6487 res.fh = nfs_alloc_fhandle();
6488 if (res.fh == NULL)
6489 return -ENOMEM;
6490
6491 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6492 nfs4_set_sequence_privileged(&args.seq_args);
6493 status = nfs4_call_sync_sequence(clnt, server, &msg,
6494 &args.seq_args, &res.seq_res);
6495 nfs_free_fhandle(res.fh);
6496 if (status == NFS4_OK &&
6497 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6498 status = -NFS4ERR_LEASE_MOVED;
6499 return status;
6500 }
6501
6502 #endif /* CONFIG_NFS_V4_1 */
6503
6504 /**
6505 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6506 * @inode: inode on FSID to check
6507 * @cred: credential to use for this operation
6508 *
6509 * Server indicates whether the FSID is present, moved, or not
6510 * recognized. This operation is necessary to clear a LEASE_MOVED
6511 * condition for this client ID.
6512 *
6513 * Returns NFS4_OK if the FSID is present on this server,
6514 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6515 * NFS4ERR code if some error occurred on the server, or a
6516 * negative errno if a local failure occurred.
6517 */
6518 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6519 {
6520 struct nfs_server *server = NFS_SERVER(inode);
6521 struct nfs_client *clp = server->nfs_client;
6522 const struct nfs4_mig_recovery_ops *ops =
6523 clp->cl_mvops->mig_recovery_ops;
6524 struct nfs4_exception exception = { };
6525 int status;
6526
6527 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6528 (unsigned long long)server->fsid.major,
6529 (unsigned long long)server->fsid.minor,
6530 clp->cl_hostname);
6531 nfs_display_fhandle(NFS_FH(inode), __func__);
6532
6533 do {
6534 status = ops->fsid_present(inode, cred);
6535 if (status != -NFS4ERR_DELAY)
6536 break;
6537 nfs4_handle_exception(server, status, &exception);
6538 } while (exception.retry);
6539 return status;
6540 }
6541
6542 /**
6543 * If 'use_integrity' is true and the state managment nfs_client
6544 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6545 * and the machine credential as per RFC3530bis and RFC5661 Security
6546 * Considerations sections. Otherwise, just use the user cred with the
6547 * filesystem's rpc_client.
6548 */
6549 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6550 {
6551 int status;
6552 struct nfs4_secinfo_arg args = {
6553 .dir_fh = NFS_FH(dir),
6554 .name = name,
6555 };
6556 struct nfs4_secinfo_res res = {
6557 .flavors = flavors,
6558 };
6559 struct rpc_message msg = {
6560 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6561 .rpc_argp = &args,
6562 .rpc_resp = &res,
6563 };
6564 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6565 struct rpc_cred *cred = NULL;
6566
6567 if (use_integrity) {
6568 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6569 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6570 msg.rpc_cred = cred;
6571 }
6572
6573 dprintk("NFS call secinfo %s\n", name->name);
6574
6575 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6576 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6577
6578 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6579 &res.seq_res, 0);
6580 dprintk("NFS reply secinfo: %d\n", status);
6581
6582 if (cred)
6583 put_rpccred(cred);
6584
6585 return status;
6586 }
6587
6588 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6589 struct nfs4_secinfo_flavors *flavors)
6590 {
6591 struct nfs4_exception exception = { };
6592 int err;
6593 do {
6594 err = -NFS4ERR_WRONGSEC;
6595
6596 /* try to use integrity protection with machine cred */
6597 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6598 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6599
6600 /*
6601 * if unable to use integrity protection, or SECINFO with
6602 * integrity protection returns NFS4ERR_WRONGSEC (which is
6603 * disallowed by spec, but exists in deployed servers) use
6604 * the current filesystem's rpc_client and the user cred.
6605 */
6606 if (err == -NFS4ERR_WRONGSEC)
6607 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6608
6609 trace_nfs4_secinfo(dir, name, err);
6610 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6611 &exception);
6612 } while (exception.retry);
6613 return err;
6614 }
6615
6616 #ifdef CONFIG_NFS_V4_1
6617 /*
6618 * Check the exchange flags returned by the server for invalid flags, having
6619 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6620 * DS flags set.
6621 */
6622 static int nfs4_check_cl_exchange_flags(u32 flags)
6623 {
6624 if (flags & ~EXCHGID4_FLAG_MASK_R)
6625 goto out_inval;
6626 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6627 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6628 goto out_inval;
6629 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6630 goto out_inval;
6631 return NFS_OK;
6632 out_inval:
6633 return -NFS4ERR_INVAL;
6634 }
6635
6636 static bool
6637 nfs41_same_server_scope(struct nfs41_server_scope *a,
6638 struct nfs41_server_scope *b)
6639 {
6640 if (a->server_scope_sz == b->server_scope_sz &&
6641 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6642 return true;
6643
6644 return false;
6645 }
6646
6647 /*
6648 * nfs4_proc_bind_conn_to_session()
6649 *
6650 * The 4.1 client currently uses the same TCP connection for the
6651 * fore and backchannel.
6652 */
6653 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6654 {
6655 int status;
6656 struct nfs41_bind_conn_to_session_args args = {
6657 .client = clp,
6658 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6659 };
6660 struct nfs41_bind_conn_to_session_res res;
6661 struct rpc_message msg = {
6662 .rpc_proc =
6663 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6664 .rpc_argp = &args,
6665 .rpc_resp = &res,
6666 .rpc_cred = cred,
6667 };
6668
6669 dprintk("--> %s\n", __func__);
6670
6671 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6672 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6673 args.dir = NFS4_CDFC4_FORE;
6674
6675 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6676 trace_nfs4_bind_conn_to_session(clp, status);
6677 if (status == 0) {
6678 if (memcmp(res.sessionid.data,
6679 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6680 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6681 status = -EIO;
6682 goto out;
6683 }
6684 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6685 dprintk("NFS: %s: Unexpected direction from server\n",
6686 __func__);
6687 status = -EIO;
6688 goto out;
6689 }
6690 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6691 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6692 __func__);
6693 status = -EIO;
6694 goto out;
6695 }
6696 }
6697 out:
6698 dprintk("<-- %s status= %d\n", __func__, status);
6699 return status;
6700 }
6701
6702 /*
6703 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6704 * and operations we'd like to see to enable certain features in the allow map
6705 */
6706 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6707 .how = SP4_MACH_CRED,
6708 .enforce.u.words = {
6709 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6710 1 << (OP_EXCHANGE_ID - 32) |
6711 1 << (OP_CREATE_SESSION - 32) |
6712 1 << (OP_DESTROY_SESSION - 32) |
6713 1 << (OP_DESTROY_CLIENTID - 32)
6714 },
6715 .allow.u.words = {
6716 [0] = 1 << (OP_CLOSE) |
6717 1 << (OP_LOCKU) |
6718 1 << (OP_COMMIT),
6719 [1] = 1 << (OP_SECINFO - 32) |
6720 1 << (OP_SECINFO_NO_NAME - 32) |
6721 1 << (OP_TEST_STATEID - 32) |
6722 1 << (OP_FREE_STATEID - 32) |
6723 1 << (OP_WRITE - 32)
6724 }
6725 };
6726
6727 /*
6728 * Select the state protection mode for client `clp' given the server results
6729 * from exchange_id in `sp'.
6730 *
6731 * Returns 0 on success, negative errno otherwise.
6732 */
6733 static int nfs4_sp4_select_mode(struct nfs_client *clp,
6734 struct nfs41_state_protection *sp)
6735 {
6736 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
6737 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6738 1 << (OP_EXCHANGE_ID - 32) |
6739 1 << (OP_CREATE_SESSION - 32) |
6740 1 << (OP_DESTROY_SESSION - 32) |
6741 1 << (OP_DESTROY_CLIENTID - 32)
6742 };
6743 unsigned int i;
6744
6745 if (sp->how == SP4_MACH_CRED) {
6746 /* Print state protect result */
6747 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
6748 for (i = 0; i <= LAST_NFS4_OP; i++) {
6749 if (test_bit(i, sp->enforce.u.longs))
6750 dfprintk(MOUNT, " enforce op %d\n", i);
6751 if (test_bit(i, sp->allow.u.longs))
6752 dfprintk(MOUNT, " allow op %d\n", i);
6753 }
6754
6755 /* make sure nothing is on enforce list that isn't supported */
6756 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
6757 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
6758 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6759 return -EINVAL;
6760 }
6761 }
6762
6763 /*
6764 * Minimal mode - state operations are allowed to use machine
6765 * credential. Note this already happens by default, so the
6766 * client doesn't have to do anything more than the negotiation.
6767 *
6768 * NOTE: we don't care if EXCHANGE_ID is in the list -
6769 * we're already using the machine cred for exchange_id
6770 * and will never use a different cred.
6771 */
6772 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
6773 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
6774 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
6775 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
6776 dfprintk(MOUNT, "sp4_mach_cred:\n");
6777 dfprintk(MOUNT, " minimal mode enabled\n");
6778 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
6779 } else {
6780 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6781 return -EINVAL;
6782 }
6783
6784 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
6785 test_bit(OP_LOCKU, sp->allow.u.longs)) {
6786 dfprintk(MOUNT, " cleanup mode enabled\n");
6787 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
6788 }
6789
6790 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
6791 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
6792 dfprintk(MOUNT, " secinfo mode enabled\n");
6793 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
6794 }
6795
6796 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
6797 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
6798 dfprintk(MOUNT, " stateid mode enabled\n");
6799 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
6800 }
6801
6802 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
6803 dfprintk(MOUNT, " write mode enabled\n");
6804 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
6805 }
6806
6807 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
6808 dfprintk(MOUNT, " commit mode enabled\n");
6809 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
6810 }
6811 }
6812
6813 return 0;
6814 }
6815
6816 /*
6817 * _nfs4_proc_exchange_id()
6818 *
6819 * Wrapper for EXCHANGE_ID operation.
6820 */
6821 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
6822 u32 sp4_how)
6823 {
6824 nfs4_verifier verifier;
6825 struct nfs41_exchange_id_args args = {
6826 .verifier = &verifier,
6827 .client = clp,
6828 #ifdef CONFIG_NFS_V4_1_MIGRATION
6829 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6830 EXCHGID4_FLAG_BIND_PRINC_STATEID |
6831 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
6832 #else
6833 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6834 EXCHGID4_FLAG_BIND_PRINC_STATEID,
6835 #endif
6836 };
6837 struct nfs41_exchange_id_res res = {
6838 0
6839 };
6840 int status;
6841 struct rpc_message msg = {
6842 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
6843 .rpc_argp = &args,
6844 .rpc_resp = &res,
6845 .rpc_cred = cred,
6846 };
6847
6848 nfs4_init_boot_verifier(clp, &verifier);
6849 args.id_len = nfs4_init_uniform_client_string(clp, args.id,
6850 sizeof(args.id));
6851 dprintk("NFS call exchange_id auth=%s, '%.*s'\n",
6852 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6853 args.id_len, args.id);
6854
6855 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
6856 GFP_NOFS);
6857 if (unlikely(res.server_owner == NULL)) {
6858 status = -ENOMEM;
6859 goto out;
6860 }
6861
6862 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
6863 GFP_NOFS);
6864 if (unlikely(res.server_scope == NULL)) {
6865 status = -ENOMEM;
6866 goto out_server_owner;
6867 }
6868
6869 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
6870 if (unlikely(res.impl_id == NULL)) {
6871 status = -ENOMEM;
6872 goto out_server_scope;
6873 }
6874
6875 switch (sp4_how) {
6876 case SP4_NONE:
6877 args.state_protect.how = SP4_NONE;
6878 break;
6879
6880 case SP4_MACH_CRED:
6881 args.state_protect = nfs4_sp4_mach_cred_request;
6882 break;
6883
6884 default:
6885 /* unsupported! */
6886 WARN_ON_ONCE(1);
6887 status = -EINVAL;
6888 goto out_server_scope;
6889 }
6890
6891 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6892 trace_nfs4_exchange_id(clp, status);
6893 if (status == 0)
6894 status = nfs4_check_cl_exchange_flags(res.flags);
6895
6896 if (status == 0)
6897 status = nfs4_sp4_select_mode(clp, &res.state_protect);
6898
6899 if (status == 0) {
6900 clp->cl_clientid = res.clientid;
6901 clp->cl_exchange_flags = res.flags;
6902 /* Client ID is not confirmed */
6903 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
6904 clear_bit(NFS4_SESSION_ESTABLISHED,
6905 &clp->cl_session->session_state);
6906 clp->cl_seqid = res.seqid;
6907 }
6908
6909 kfree(clp->cl_serverowner);
6910 clp->cl_serverowner = res.server_owner;
6911 res.server_owner = NULL;
6912
6913 /* use the most recent implementation id */
6914 kfree(clp->cl_implid);
6915 clp->cl_implid = res.impl_id;
6916
6917 if (clp->cl_serverscope != NULL &&
6918 !nfs41_same_server_scope(clp->cl_serverscope,
6919 res.server_scope)) {
6920 dprintk("%s: server_scope mismatch detected\n",
6921 __func__);
6922 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
6923 kfree(clp->cl_serverscope);
6924 clp->cl_serverscope = NULL;
6925 }
6926
6927 if (clp->cl_serverscope == NULL) {
6928 clp->cl_serverscope = res.server_scope;
6929 goto out;
6930 }
6931 } else
6932 kfree(res.impl_id);
6933
6934 out_server_owner:
6935 kfree(res.server_owner);
6936 out_server_scope:
6937 kfree(res.server_scope);
6938 out:
6939 if (clp->cl_implid != NULL)
6940 dprintk("NFS reply exchange_id: Server Implementation ID: "
6941 "domain: %s, name: %s, date: %llu,%u\n",
6942 clp->cl_implid->domain, clp->cl_implid->name,
6943 clp->cl_implid->date.seconds,
6944 clp->cl_implid->date.nseconds);
6945 dprintk("NFS reply exchange_id: %d\n", status);
6946 return status;
6947 }
6948
6949 /*
6950 * nfs4_proc_exchange_id()
6951 *
6952 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6953 *
6954 * Since the clientid has expired, all compounds using sessions
6955 * associated with the stale clientid will be returning
6956 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
6957 * be in some phase of session reset.
6958 *
6959 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
6960 */
6961 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
6962 {
6963 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
6964 int status;
6965
6966 /* try SP4_MACH_CRED if krb5i/p */
6967 if (authflavor == RPC_AUTH_GSS_KRB5I ||
6968 authflavor == RPC_AUTH_GSS_KRB5P) {
6969 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
6970 if (!status)
6971 return 0;
6972 }
6973
6974 /* try SP4_NONE */
6975 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
6976 }
6977
6978 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
6979 struct rpc_cred *cred)
6980 {
6981 struct rpc_message msg = {
6982 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
6983 .rpc_argp = clp,
6984 .rpc_cred = cred,
6985 };
6986 int status;
6987
6988 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6989 trace_nfs4_destroy_clientid(clp, status);
6990 if (status)
6991 dprintk("NFS: Got error %d from the server %s on "
6992 "DESTROY_CLIENTID.", status, clp->cl_hostname);
6993 return status;
6994 }
6995
6996 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
6997 struct rpc_cred *cred)
6998 {
6999 unsigned int loop;
7000 int ret;
7001
7002 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7003 ret = _nfs4_proc_destroy_clientid(clp, cred);
7004 switch (ret) {
7005 case -NFS4ERR_DELAY:
7006 case -NFS4ERR_CLIENTID_BUSY:
7007 ssleep(1);
7008 break;
7009 default:
7010 return ret;
7011 }
7012 }
7013 return 0;
7014 }
7015
7016 int nfs4_destroy_clientid(struct nfs_client *clp)
7017 {
7018 struct rpc_cred *cred;
7019 int ret = 0;
7020
7021 if (clp->cl_mvops->minor_version < 1)
7022 goto out;
7023 if (clp->cl_exchange_flags == 0)
7024 goto out;
7025 if (clp->cl_preserve_clid)
7026 goto out;
7027 cred = nfs4_get_clid_cred(clp);
7028 ret = nfs4_proc_destroy_clientid(clp, cred);
7029 if (cred)
7030 put_rpccred(cred);
7031 switch (ret) {
7032 case 0:
7033 case -NFS4ERR_STALE_CLIENTID:
7034 clp->cl_exchange_flags = 0;
7035 }
7036 out:
7037 return ret;
7038 }
7039
7040 struct nfs4_get_lease_time_data {
7041 struct nfs4_get_lease_time_args *args;
7042 struct nfs4_get_lease_time_res *res;
7043 struct nfs_client *clp;
7044 };
7045
7046 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7047 void *calldata)
7048 {
7049 struct nfs4_get_lease_time_data *data =
7050 (struct nfs4_get_lease_time_data *)calldata;
7051
7052 dprintk("--> %s\n", __func__);
7053 /* just setup sequence, do not trigger session recovery
7054 since we're invoked within one */
7055 nfs41_setup_sequence(data->clp->cl_session,
7056 &data->args->la_seq_args,
7057 &data->res->lr_seq_res,
7058 task);
7059 dprintk("<-- %s\n", __func__);
7060 }
7061
7062 /*
7063 * Called from nfs4_state_manager thread for session setup, so don't recover
7064 * from sequence operation or clientid errors.
7065 */
7066 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7067 {
7068 struct nfs4_get_lease_time_data *data =
7069 (struct nfs4_get_lease_time_data *)calldata;
7070
7071 dprintk("--> %s\n", __func__);
7072 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7073 return;
7074 switch (task->tk_status) {
7075 case -NFS4ERR_DELAY:
7076 case -NFS4ERR_GRACE:
7077 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7078 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7079 task->tk_status = 0;
7080 /* fall through */
7081 case -NFS4ERR_RETRY_UNCACHED_REP:
7082 rpc_restart_call_prepare(task);
7083 return;
7084 }
7085 dprintk("<-- %s\n", __func__);
7086 }
7087
7088 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7089 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7090 .rpc_call_done = nfs4_get_lease_time_done,
7091 };
7092
7093 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7094 {
7095 struct rpc_task *task;
7096 struct nfs4_get_lease_time_args args;
7097 struct nfs4_get_lease_time_res res = {
7098 .lr_fsinfo = fsinfo,
7099 };
7100 struct nfs4_get_lease_time_data data = {
7101 .args = &args,
7102 .res = &res,
7103 .clp = clp,
7104 };
7105 struct rpc_message msg = {
7106 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7107 .rpc_argp = &args,
7108 .rpc_resp = &res,
7109 };
7110 struct rpc_task_setup task_setup = {
7111 .rpc_client = clp->cl_rpcclient,
7112 .rpc_message = &msg,
7113 .callback_ops = &nfs4_get_lease_time_ops,
7114 .callback_data = &data,
7115 .flags = RPC_TASK_TIMEOUT,
7116 };
7117 int status;
7118
7119 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7120 nfs4_set_sequence_privileged(&args.la_seq_args);
7121 dprintk("--> %s\n", __func__);
7122 task = rpc_run_task(&task_setup);
7123
7124 if (IS_ERR(task))
7125 status = PTR_ERR(task);
7126 else {
7127 status = task->tk_status;
7128 rpc_put_task(task);
7129 }
7130 dprintk("<-- %s return %d\n", __func__, status);
7131
7132 return status;
7133 }
7134
7135 /*
7136 * Initialize the values to be used by the client in CREATE_SESSION
7137 * If nfs4_init_session set the fore channel request and response sizes,
7138 * use them.
7139 *
7140 * Set the back channel max_resp_sz_cached to zero to force the client to
7141 * always set csa_cachethis to FALSE because the current implementation
7142 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7143 */
7144 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7145 {
7146 unsigned int max_rqst_sz, max_resp_sz;
7147
7148 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7149 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7150
7151 /* Fore channel attributes */
7152 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7153 args->fc_attrs.max_resp_sz = max_resp_sz;
7154 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7155 args->fc_attrs.max_reqs = max_session_slots;
7156
7157 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7158 "max_ops=%u max_reqs=%u\n",
7159 __func__,
7160 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7161 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7162
7163 /* Back channel attributes */
7164 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
7165 args->bc_attrs.max_resp_sz = PAGE_SIZE;
7166 args->bc_attrs.max_resp_sz_cached = 0;
7167 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7168 args->bc_attrs.max_reqs = 1;
7169
7170 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7171 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7172 __func__,
7173 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7174 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7175 args->bc_attrs.max_reqs);
7176 }
7177
7178 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7179 struct nfs41_create_session_res *res)
7180 {
7181 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7182 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7183
7184 if (rcvd->max_resp_sz > sent->max_resp_sz)
7185 return -EINVAL;
7186 /*
7187 * Our requested max_ops is the minimum we need; we're not
7188 * prepared to break up compounds into smaller pieces than that.
7189 * So, no point even trying to continue if the server won't
7190 * cooperate:
7191 */
7192 if (rcvd->max_ops < sent->max_ops)
7193 return -EINVAL;
7194 if (rcvd->max_reqs == 0)
7195 return -EINVAL;
7196 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7197 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7198 return 0;
7199 }
7200
7201 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7202 struct nfs41_create_session_res *res)
7203 {
7204 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7205 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7206
7207 if (!(res->flags & SESSION4_BACK_CHAN))
7208 goto out;
7209 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7210 return -EINVAL;
7211 if (rcvd->max_resp_sz < sent->max_resp_sz)
7212 return -EINVAL;
7213 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7214 return -EINVAL;
7215 /* These would render the backchannel useless: */
7216 if (rcvd->max_ops != sent->max_ops)
7217 return -EINVAL;
7218 if (rcvd->max_reqs != sent->max_reqs)
7219 return -EINVAL;
7220 out:
7221 return 0;
7222 }
7223
7224 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7225 struct nfs41_create_session_res *res)
7226 {
7227 int ret;
7228
7229 ret = nfs4_verify_fore_channel_attrs(args, res);
7230 if (ret)
7231 return ret;
7232 return nfs4_verify_back_channel_attrs(args, res);
7233 }
7234
7235 static void nfs4_update_session(struct nfs4_session *session,
7236 struct nfs41_create_session_res *res)
7237 {
7238 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7239 /* Mark client id and session as being confirmed */
7240 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7241 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7242 session->flags = res->flags;
7243 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7244 if (res->flags & SESSION4_BACK_CHAN)
7245 memcpy(&session->bc_attrs, &res->bc_attrs,
7246 sizeof(session->bc_attrs));
7247 }
7248
7249 static int _nfs4_proc_create_session(struct nfs_client *clp,
7250 struct rpc_cred *cred)
7251 {
7252 struct nfs4_session *session = clp->cl_session;
7253 struct nfs41_create_session_args args = {
7254 .client = clp,
7255 .clientid = clp->cl_clientid,
7256 .seqid = clp->cl_seqid,
7257 .cb_program = NFS4_CALLBACK,
7258 };
7259 struct nfs41_create_session_res res;
7260
7261 struct rpc_message msg = {
7262 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7263 .rpc_argp = &args,
7264 .rpc_resp = &res,
7265 .rpc_cred = cred,
7266 };
7267 int status;
7268
7269 nfs4_init_channel_attrs(&args);
7270 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7271
7272 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7273 trace_nfs4_create_session(clp, status);
7274
7275 if (!status) {
7276 /* Verify the session's negotiated channel_attrs values */
7277 status = nfs4_verify_channel_attrs(&args, &res);
7278 /* Increment the clientid slot sequence id */
7279 if (clp->cl_seqid == res.seqid)
7280 clp->cl_seqid++;
7281 if (status)
7282 goto out;
7283 nfs4_update_session(session, &res);
7284 }
7285 out:
7286 return status;
7287 }
7288
7289 /*
7290 * Issues a CREATE_SESSION operation to the server.
7291 * It is the responsibility of the caller to verify the session is
7292 * expired before calling this routine.
7293 */
7294 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7295 {
7296 int status;
7297 unsigned *ptr;
7298 struct nfs4_session *session = clp->cl_session;
7299
7300 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7301
7302 status = _nfs4_proc_create_session(clp, cred);
7303 if (status)
7304 goto out;
7305
7306 /* Init or reset the session slot tables */
7307 status = nfs4_setup_session_slot_tables(session);
7308 dprintk("slot table setup returned %d\n", status);
7309 if (status)
7310 goto out;
7311
7312 ptr = (unsigned *)&session->sess_id.data[0];
7313 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7314 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7315 out:
7316 dprintk("<-- %s\n", __func__);
7317 return status;
7318 }
7319
7320 /*
7321 * Issue the over-the-wire RPC DESTROY_SESSION.
7322 * The caller must serialize access to this routine.
7323 */
7324 int nfs4_proc_destroy_session(struct nfs4_session *session,
7325 struct rpc_cred *cred)
7326 {
7327 struct rpc_message msg = {
7328 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7329 .rpc_argp = session,
7330 .rpc_cred = cred,
7331 };
7332 int status = 0;
7333
7334 dprintk("--> nfs4_proc_destroy_session\n");
7335
7336 /* session is still being setup */
7337 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7338 return 0;
7339
7340 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7341 trace_nfs4_destroy_session(session->clp, status);
7342
7343 if (status)
7344 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7345 "Session has been destroyed regardless...\n", status);
7346
7347 dprintk("<-- nfs4_proc_destroy_session\n");
7348 return status;
7349 }
7350
7351 /*
7352 * Renew the cl_session lease.
7353 */
7354 struct nfs4_sequence_data {
7355 struct nfs_client *clp;
7356 struct nfs4_sequence_args args;
7357 struct nfs4_sequence_res res;
7358 };
7359
7360 static void nfs41_sequence_release(void *data)
7361 {
7362 struct nfs4_sequence_data *calldata = data;
7363 struct nfs_client *clp = calldata->clp;
7364
7365 if (atomic_read(&clp->cl_count) > 1)
7366 nfs4_schedule_state_renewal(clp);
7367 nfs_put_client(clp);
7368 kfree(calldata);
7369 }
7370
7371 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7372 {
7373 switch(task->tk_status) {
7374 case -NFS4ERR_DELAY:
7375 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7376 return -EAGAIN;
7377 default:
7378 nfs4_schedule_lease_recovery(clp);
7379 }
7380 return 0;
7381 }
7382
7383 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7384 {
7385 struct nfs4_sequence_data *calldata = data;
7386 struct nfs_client *clp = calldata->clp;
7387
7388 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7389 return;
7390
7391 trace_nfs4_sequence(clp, task->tk_status);
7392 if (task->tk_status < 0) {
7393 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7394 if (atomic_read(&clp->cl_count) == 1)
7395 goto out;
7396
7397 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7398 rpc_restart_call_prepare(task);
7399 return;
7400 }
7401 }
7402 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7403 out:
7404 dprintk("<-- %s\n", __func__);
7405 }
7406
7407 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7408 {
7409 struct nfs4_sequence_data *calldata = data;
7410 struct nfs_client *clp = calldata->clp;
7411 struct nfs4_sequence_args *args;
7412 struct nfs4_sequence_res *res;
7413
7414 args = task->tk_msg.rpc_argp;
7415 res = task->tk_msg.rpc_resp;
7416
7417 nfs41_setup_sequence(clp->cl_session, args, res, task);
7418 }
7419
7420 static const struct rpc_call_ops nfs41_sequence_ops = {
7421 .rpc_call_done = nfs41_sequence_call_done,
7422 .rpc_call_prepare = nfs41_sequence_prepare,
7423 .rpc_release = nfs41_sequence_release,
7424 };
7425
7426 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7427 struct rpc_cred *cred,
7428 bool is_privileged)
7429 {
7430 struct nfs4_sequence_data *calldata;
7431 struct rpc_message msg = {
7432 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7433 .rpc_cred = cred,
7434 };
7435 struct rpc_task_setup task_setup_data = {
7436 .rpc_client = clp->cl_rpcclient,
7437 .rpc_message = &msg,
7438 .callback_ops = &nfs41_sequence_ops,
7439 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7440 };
7441
7442 if (!atomic_inc_not_zero(&clp->cl_count))
7443 return ERR_PTR(-EIO);
7444 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7445 if (calldata == NULL) {
7446 nfs_put_client(clp);
7447 return ERR_PTR(-ENOMEM);
7448 }
7449 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7450 if (is_privileged)
7451 nfs4_set_sequence_privileged(&calldata->args);
7452 msg.rpc_argp = &calldata->args;
7453 msg.rpc_resp = &calldata->res;
7454 calldata->clp = clp;
7455 task_setup_data.callback_data = calldata;
7456
7457 return rpc_run_task(&task_setup_data);
7458 }
7459
7460 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7461 {
7462 struct rpc_task *task;
7463 int ret = 0;
7464
7465 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7466 return -EAGAIN;
7467 task = _nfs41_proc_sequence(clp, cred, false);
7468 if (IS_ERR(task))
7469 ret = PTR_ERR(task);
7470 else
7471 rpc_put_task_async(task);
7472 dprintk("<-- %s status=%d\n", __func__, ret);
7473 return ret;
7474 }
7475
7476 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7477 {
7478 struct rpc_task *task;
7479 int ret;
7480
7481 task = _nfs41_proc_sequence(clp, cred, true);
7482 if (IS_ERR(task)) {
7483 ret = PTR_ERR(task);
7484 goto out;
7485 }
7486 ret = rpc_wait_for_completion_task(task);
7487 if (!ret) {
7488 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
7489
7490 if (task->tk_status == 0)
7491 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
7492 ret = task->tk_status;
7493 }
7494 rpc_put_task(task);
7495 out:
7496 dprintk("<-- %s status=%d\n", __func__, ret);
7497 return ret;
7498 }
7499
7500 struct nfs4_reclaim_complete_data {
7501 struct nfs_client *clp;
7502 struct nfs41_reclaim_complete_args arg;
7503 struct nfs41_reclaim_complete_res res;
7504 };
7505
7506 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7507 {
7508 struct nfs4_reclaim_complete_data *calldata = data;
7509
7510 nfs41_setup_sequence(calldata->clp->cl_session,
7511 &calldata->arg.seq_args,
7512 &calldata->res.seq_res,
7513 task);
7514 }
7515
7516 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7517 {
7518 switch(task->tk_status) {
7519 case 0:
7520 case -NFS4ERR_COMPLETE_ALREADY:
7521 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7522 break;
7523 case -NFS4ERR_DELAY:
7524 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7525 /* fall through */
7526 case -NFS4ERR_RETRY_UNCACHED_REP:
7527 return -EAGAIN;
7528 default:
7529 nfs4_schedule_lease_recovery(clp);
7530 }
7531 return 0;
7532 }
7533
7534 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7535 {
7536 struct nfs4_reclaim_complete_data *calldata = data;
7537 struct nfs_client *clp = calldata->clp;
7538 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7539
7540 dprintk("--> %s\n", __func__);
7541 if (!nfs41_sequence_done(task, res))
7542 return;
7543
7544 trace_nfs4_reclaim_complete(clp, task->tk_status);
7545 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7546 rpc_restart_call_prepare(task);
7547 return;
7548 }
7549 dprintk("<-- %s\n", __func__);
7550 }
7551
7552 static void nfs4_free_reclaim_complete_data(void *data)
7553 {
7554 struct nfs4_reclaim_complete_data *calldata = data;
7555
7556 kfree(calldata);
7557 }
7558
7559 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7560 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7561 .rpc_call_done = nfs4_reclaim_complete_done,
7562 .rpc_release = nfs4_free_reclaim_complete_data,
7563 };
7564
7565 /*
7566 * Issue a global reclaim complete.
7567 */
7568 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7569 struct rpc_cred *cred)
7570 {
7571 struct nfs4_reclaim_complete_data *calldata;
7572 struct rpc_task *task;
7573 struct rpc_message msg = {
7574 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7575 .rpc_cred = cred,
7576 };
7577 struct rpc_task_setup task_setup_data = {
7578 .rpc_client = clp->cl_rpcclient,
7579 .rpc_message = &msg,
7580 .callback_ops = &nfs4_reclaim_complete_call_ops,
7581 .flags = RPC_TASK_ASYNC,
7582 };
7583 int status = -ENOMEM;
7584
7585 dprintk("--> %s\n", __func__);
7586 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7587 if (calldata == NULL)
7588 goto out;
7589 calldata->clp = clp;
7590 calldata->arg.one_fs = 0;
7591
7592 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7593 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7594 msg.rpc_argp = &calldata->arg;
7595 msg.rpc_resp = &calldata->res;
7596 task_setup_data.callback_data = calldata;
7597 task = rpc_run_task(&task_setup_data);
7598 if (IS_ERR(task)) {
7599 status = PTR_ERR(task);
7600 goto out;
7601 }
7602 status = nfs4_wait_for_completion_rpc_task(task);
7603 if (status == 0)
7604 status = task->tk_status;
7605 rpc_put_task(task);
7606 return 0;
7607 out:
7608 dprintk("<-- %s status=%d\n", __func__, status);
7609 return status;
7610 }
7611
7612 static void
7613 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7614 {
7615 struct nfs4_layoutget *lgp = calldata;
7616 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7617 struct nfs4_session *session = nfs4_get_session(server);
7618
7619 dprintk("--> %s\n", __func__);
7620 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
7621 * right now covering the LAYOUTGET we are about to send.
7622 * However, that is not so catastrophic, and there seems
7623 * to be no way to prevent it completely.
7624 */
7625 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
7626 &lgp->res.seq_res, task))
7627 return;
7628 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
7629 NFS_I(lgp->args.inode)->layout,
7630 &lgp->args.range,
7631 lgp->args.ctx->state)) {
7632 rpc_exit(task, NFS4_OK);
7633 }
7634 }
7635
7636 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7637 {
7638 struct nfs4_layoutget *lgp = calldata;
7639 struct inode *inode = lgp->args.inode;
7640 struct nfs_server *server = NFS_SERVER(inode);
7641 struct pnfs_layout_hdr *lo;
7642 struct nfs4_state *state = NULL;
7643 unsigned long timeo, now, giveup;
7644
7645 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7646
7647 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
7648 goto out;
7649
7650 switch (task->tk_status) {
7651 case 0:
7652 goto out;
7653 /*
7654 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7655 * (or clients) writing to the same RAID stripe
7656 */
7657 case -NFS4ERR_LAYOUTTRYLATER:
7658 /*
7659 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
7660 * existing layout before getting a new one).
7661 */
7662 case -NFS4ERR_RECALLCONFLICT:
7663 timeo = rpc_get_timeout(task->tk_client);
7664 giveup = lgp->args.timestamp + timeo;
7665 now = jiffies;
7666 if (time_after(giveup, now)) {
7667 unsigned long delay;
7668
7669 /* Delay for:
7670 * - Not less then NFS4_POLL_RETRY_MIN.
7671 * - One last time a jiffie before we give up
7672 * - exponential backoff (time_now minus start_attempt)
7673 */
7674 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
7675 min((giveup - now - 1),
7676 now - lgp->args.timestamp));
7677
7678 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7679 __func__, delay);
7680 rpc_delay(task, delay);
7681 task->tk_status = 0;
7682 rpc_restart_call_prepare(task);
7683 goto out; /* Do not call nfs4_async_handle_error() */
7684 }
7685 break;
7686 case -NFS4ERR_EXPIRED:
7687 case -NFS4ERR_BAD_STATEID:
7688 spin_lock(&inode->i_lock);
7689 lo = NFS_I(inode)->layout;
7690 if (!lo || list_empty(&lo->plh_segs)) {
7691 spin_unlock(&inode->i_lock);
7692 /* If the open stateid was bad, then recover it. */
7693 state = lgp->args.ctx->state;
7694 } else {
7695 LIST_HEAD(head);
7696
7697 /*
7698 * Mark the bad layout state as invalid, then retry
7699 * with the current stateid.
7700 */
7701 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7702 spin_unlock(&inode->i_lock);
7703 pnfs_free_lseg_list(&head);
7704
7705 task->tk_status = 0;
7706 rpc_restart_call_prepare(task);
7707 }
7708 }
7709 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
7710 rpc_restart_call_prepare(task);
7711 out:
7712 dprintk("<-- %s\n", __func__);
7713 }
7714
7715 static size_t max_response_pages(struct nfs_server *server)
7716 {
7717 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
7718 return nfs_page_array_len(0, max_resp_sz);
7719 }
7720
7721 static void nfs4_free_pages(struct page **pages, size_t size)
7722 {
7723 int i;
7724
7725 if (!pages)
7726 return;
7727
7728 for (i = 0; i < size; i++) {
7729 if (!pages[i])
7730 break;
7731 __free_page(pages[i]);
7732 }
7733 kfree(pages);
7734 }
7735
7736 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
7737 {
7738 struct page **pages;
7739 int i;
7740
7741 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
7742 if (!pages) {
7743 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
7744 return NULL;
7745 }
7746
7747 for (i = 0; i < size; i++) {
7748 pages[i] = alloc_page(gfp_flags);
7749 if (!pages[i]) {
7750 dprintk("%s: failed to allocate page\n", __func__);
7751 nfs4_free_pages(pages, size);
7752 return NULL;
7753 }
7754 }
7755
7756 return pages;
7757 }
7758
7759 static void nfs4_layoutget_release(void *calldata)
7760 {
7761 struct nfs4_layoutget *lgp = calldata;
7762 struct inode *inode = lgp->args.inode;
7763 struct nfs_server *server = NFS_SERVER(inode);
7764 size_t max_pages = max_response_pages(server);
7765
7766 dprintk("--> %s\n", __func__);
7767 nfs4_free_pages(lgp->args.layout.pages, max_pages);
7768 pnfs_put_layout_hdr(NFS_I(inode)->layout);
7769 put_nfs_open_context(lgp->args.ctx);
7770 kfree(calldata);
7771 dprintk("<-- %s\n", __func__);
7772 }
7773
7774 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
7775 .rpc_call_prepare = nfs4_layoutget_prepare,
7776 .rpc_call_done = nfs4_layoutget_done,
7777 .rpc_release = nfs4_layoutget_release,
7778 };
7779
7780 struct pnfs_layout_segment *
7781 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
7782 {
7783 struct inode *inode = lgp->args.inode;
7784 struct nfs_server *server = NFS_SERVER(inode);
7785 size_t max_pages = max_response_pages(server);
7786 struct rpc_task *task;
7787 struct rpc_message msg = {
7788 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
7789 .rpc_argp = &lgp->args,
7790 .rpc_resp = &lgp->res,
7791 .rpc_cred = lgp->cred,
7792 };
7793 struct rpc_task_setup task_setup_data = {
7794 .rpc_client = server->client,
7795 .rpc_message = &msg,
7796 .callback_ops = &nfs4_layoutget_call_ops,
7797 .callback_data = lgp,
7798 .flags = RPC_TASK_ASYNC,
7799 };
7800 struct pnfs_layout_segment *lseg = NULL;
7801 int status = 0;
7802
7803 dprintk("--> %s\n", __func__);
7804
7805 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
7806 pnfs_get_layout_hdr(NFS_I(inode)->layout);
7807
7808 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
7809 if (!lgp->args.layout.pages) {
7810 nfs4_layoutget_release(lgp);
7811 return ERR_PTR(-ENOMEM);
7812 }
7813 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
7814 lgp->args.timestamp = jiffies;
7815
7816 lgp->res.layoutp = &lgp->args.layout;
7817 lgp->res.seq_res.sr_slot = NULL;
7818 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
7819
7820 task = rpc_run_task(&task_setup_data);
7821 if (IS_ERR(task))
7822 return ERR_CAST(task);
7823 status = nfs4_wait_for_completion_rpc_task(task);
7824 if (status == 0)
7825 status = task->tk_status;
7826 trace_nfs4_layoutget(lgp->args.ctx,
7827 &lgp->args.range,
7828 &lgp->res.range,
7829 status);
7830 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
7831 if (status == 0 && lgp->res.layoutp->len)
7832 lseg = pnfs_layout_process(lgp);
7833 rpc_put_task(task);
7834 dprintk("<-- %s status=%d\n", __func__, status);
7835 if (status)
7836 return ERR_PTR(status);
7837 return lseg;
7838 }
7839
7840 static void
7841 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
7842 {
7843 struct nfs4_layoutreturn *lrp = calldata;
7844
7845 dprintk("--> %s\n", __func__);
7846 nfs41_setup_sequence(lrp->clp->cl_session,
7847 &lrp->args.seq_args,
7848 &lrp->res.seq_res,
7849 task);
7850 }
7851
7852 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
7853 {
7854 struct nfs4_layoutreturn *lrp = calldata;
7855 struct nfs_server *server;
7856
7857 dprintk("--> %s\n", __func__);
7858
7859 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
7860 return;
7861
7862 server = NFS_SERVER(lrp->args.inode);
7863 switch (task->tk_status) {
7864 default:
7865 task->tk_status = 0;
7866 case 0:
7867 break;
7868 case -NFS4ERR_DELAY:
7869 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
7870 break;
7871 rpc_restart_call_prepare(task);
7872 return;
7873 }
7874 dprintk("<-- %s\n", __func__);
7875 }
7876
7877 static void nfs4_layoutreturn_release(void *calldata)
7878 {
7879 struct nfs4_layoutreturn *lrp = calldata;
7880 struct pnfs_layout_hdr *lo = lrp->args.layout;
7881
7882 dprintk("--> %s\n", __func__);
7883 spin_lock(&lo->plh_inode->i_lock);
7884 if (lrp->res.lrs_present)
7885 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
7886 pnfs_clear_layoutreturn_waitbit(lo);
7887 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
7888 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
7889 lo->plh_block_lgets--;
7890 spin_unlock(&lo->plh_inode->i_lock);
7891 pnfs_put_layout_hdr(lrp->args.layout);
7892 nfs_iput_and_deactive(lrp->inode);
7893 kfree(calldata);
7894 dprintk("<-- %s\n", __func__);
7895 }
7896
7897 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
7898 .rpc_call_prepare = nfs4_layoutreturn_prepare,
7899 .rpc_call_done = nfs4_layoutreturn_done,
7900 .rpc_release = nfs4_layoutreturn_release,
7901 };
7902
7903 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
7904 {
7905 struct rpc_task *task;
7906 struct rpc_message msg = {
7907 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
7908 .rpc_argp = &lrp->args,
7909 .rpc_resp = &lrp->res,
7910 .rpc_cred = lrp->cred,
7911 };
7912 struct rpc_task_setup task_setup_data = {
7913 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
7914 .rpc_message = &msg,
7915 .callback_ops = &nfs4_layoutreturn_call_ops,
7916 .callback_data = lrp,
7917 };
7918 int status = 0;
7919
7920 dprintk("--> %s\n", __func__);
7921 if (!sync) {
7922 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
7923 if (!lrp->inode) {
7924 nfs4_layoutreturn_release(lrp);
7925 return -EAGAIN;
7926 }
7927 task_setup_data.flags |= RPC_TASK_ASYNC;
7928 }
7929 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
7930 task = rpc_run_task(&task_setup_data);
7931 if (IS_ERR(task))
7932 return PTR_ERR(task);
7933 if (sync)
7934 status = task->tk_status;
7935 trace_nfs4_layoutreturn(lrp->args.inode, status);
7936 dprintk("<-- %s status=%d\n", __func__, status);
7937 rpc_put_task(task);
7938 return status;
7939 }
7940
7941 static int
7942 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
7943 struct pnfs_device *pdev,
7944 struct rpc_cred *cred)
7945 {
7946 struct nfs4_getdeviceinfo_args args = {
7947 .pdev = pdev,
7948 .notify_types = NOTIFY_DEVICEID4_CHANGE |
7949 NOTIFY_DEVICEID4_DELETE,
7950 };
7951 struct nfs4_getdeviceinfo_res res = {
7952 .pdev = pdev,
7953 };
7954 struct rpc_message msg = {
7955 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
7956 .rpc_argp = &args,
7957 .rpc_resp = &res,
7958 .rpc_cred = cred,
7959 };
7960 int status;
7961
7962 dprintk("--> %s\n", __func__);
7963 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
7964 if (res.notification & ~args.notify_types)
7965 dprintk("%s: unsupported notification\n", __func__);
7966 if (res.notification != args.notify_types)
7967 pdev->nocache = 1;
7968
7969 dprintk("<-- %s status=%d\n", __func__, status);
7970
7971 return status;
7972 }
7973
7974 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
7975 struct pnfs_device *pdev,
7976 struct rpc_cred *cred)
7977 {
7978 struct nfs4_exception exception = { };
7979 int err;
7980
7981 do {
7982 err = nfs4_handle_exception(server,
7983 _nfs4_proc_getdeviceinfo(server, pdev, cred),
7984 &exception);
7985 } while (exception.retry);
7986 return err;
7987 }
7988 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
7989
7990 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
7991 {
7992 struct nfs4_layoutcommit_data *data = calldata;
7993 struct nfs_server *server = NFS_SERVER(data->args.inode);
7994 struct nfs4_session *session = nfs4_get_session(server);
7995
7996 nfs41_setup_sequence(session,
7997 &data->args.seq_args,
7998 &data->res.seq_res,
7999 task);
8000 }
8001
8002 static void
8003 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8004 {
8005 struct nfs4_layoutcommit_data *data = calldata;
8006 struct nfs_server *server = NFS_SERVER(data->args.inode);
8007
8008 if (!nfs41_sequence_done(task, &data->res.seq_res))
8009 return;
8010
8011 switch (task->tk_status) { /* Just ignore these failures */
8012 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8013 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8014 case -NFS4ERR_BADLAYOUT: /* no layout */
8015 case -NFS4ERR_GRACE: /* loca_recalim always false */
8016 task->tk_status = 0;
8017 case 0:
8018 break;
8019 default:
8020 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8021 rpc_restart_call_prepare(task);
8022 return;
8023 }
8024 }
8025 }
8026
8027 static void nfs4_layoutcommit_release(void *calldata)
8028 {
8029 struct nfs4_layoutcommit_data *data = calldata;
8030
8031 pnfs_cleanup_layoutcommit(data);
8032 nfs_post_op_update_inode_force_wcc(data->args.inode,
8033 data->res.fattr);
8034 put_rpccred(data->cred);
8035 nfs_iput_and_deactive(data->inode);
8036 kfree(data);
8037 }
8038
8039 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8040 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8041 .rpc_call_done = nfs4_layoutcommit_done,
8042 .rpc_release = nfs4_layoutcommit_release,
8043 };
8044
8045 int
8046 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8047 {
8048 struct rpc_message msg = {
8049 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8050 .rpc_argp = &data->args,
8051 .rpc_resp = &data->res,
8052 .rpc_cred = data->cred,
8053 };
8054 struct rpc_task_setup task_setup_data = {
8055 .task = &data->task,
8056 .rpc_client = NFS_CLIENT(data->args.inode),
8057 .rpc_message = &msg,
8058 .callback_ops = &nfs4_layoutcommit_ops,
8059 .callback_data = data,
8060 };
8061 struct rpc_task *task;
8062 int status = 0;
8063
8064 dprintk("NFS: %4d initiating layoutcommit call. sync %d "
8065 "lbw: %llu inode %lu\n",
8066 data->task.tk_pid, sync,
8067 data->args.lastbytewritten,
8068 data->args.inode->i_ino);
8069
8070 if (!sync) {
8071 data->inode = nfs_igrab_and_active(data->args.inode);
8072 if (data->inode == NULL) {
8073 nfs4_layoutcommit_release(data);
8074 return -EAGAIN;
8075 }
8076 task_setup_data.flags = RPC_TASK_ASYNC;
8077 }
8078 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8079 task = rpc_run_task(&task_setup_data);
8080 if (IS_ERR(task))
8081 return PTR_ERR(task);
8082 if (sync)
8083 status = task->tk_status;
8084 trace_nfs4_layoutcommit(data->args.inode, status);
8085 dprintk("%s: status %d\n", __func__, status);
8086 rpc_put_task(task);
8087 return status;
8088 }
8089
8090 /**
8091 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8092 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8093 */
8094 static int
8095 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8096 struct nfs_fsinfo *info,
8097 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8098 {
8099 struct nfs41_secinfo_no_name_args args = {
8100 .style = SECINFO_STYLE_CURRENT_FH,
8101 };
8102 struct nfs4_secinfo_res res = {
8103 .flavors = flavors,
8104 };
8105 struct rpc_message msg = {
8106 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8107 .rpc_argp = &args,
8108 .rpc_resp = &res,
8109 };
8110 struct rpc_clnt *clnt = server->client;
8111 struct rpc_cred *cred = NULL;
8112 int status;
8113
8114 if (use_integrity) {
8115 clnt = server->nfs_client->cl_rpcclient;
8116 cred = nfs4_get_clid_cred(server->nfs_client);
8117 msg.rpc_cred = cred;
8118 }
8119
8120 dprintk("--> %s\n", __func__);
8121 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8122 &res.seq_res, 0);
8123 dprintk("<-- %s status=%d\n", __func__, status);
8124
8125 if (cred)
8126 put_rpccred(cred);
8127
8128 return status;
8129 }
8130
8131 static int
8132 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8133 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8134 {
8135 struct nfs4_exception exception = { };
8136 int err;
8137 do {
8138 /* first try using integrity protection */
8139 err = -NFS4ERR_WRONGSEC;
8140
8141 /* try to use integrity protection with machine cred */
8142 if (_nfs4_is_integrity_protected(server->nfs_client))
8143 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8144 flavors, true);
8145
8146 /*
8147 * if unable to use integrity protection, or SECINFO with
8148 * integrity protection returns NFS4ERR_WRONGSEC (which is
8149 * disallowed by spec, but exists in deployed servers) use
8150 * the current filesystem's rpc_client and the user cred.
8151 */
8152 if (err == -NFS4ERR_WRONGSEC)
8153 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8154 flavors, false);
8155
8156 switch (err) {
8157 case 0:
8158 case -NFS4ERR_WRONGSEC:
8159 case -ENOTSUPP:
8160 goto out;
8161 default:
8162 err = nfs4_handle_exception(server, err, &exception);
8163 }
8164 } while (exception.retry);
8165 out:
8166 return err;
8167 }
8168
8169 static int
8170 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8171 struct nfs_fsinfo *info)
8172 {
8173 int err;
8174 struct page *page;
8175 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8176 struct nfs4_secinfo_flavors *flavors;
8177 struct nfs4_secinfo4 *secinfo;
8178 int i;
8179
8180 page = alloc_page(GFP_KERNEL);
8181 if (!page) {
8182 err = -ENOMEM;
8183 goto out;
8184 }
8185
8186 flavors = page_address(page);
8187 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8188
8189 /*
8190 * Fall back on "guess and check" method if
8191 * the server doesn't support SECINFO_NO_NAME
8192 */
8193 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8194 err = nfs4_find_root_sec(server, fhandle, info);
8195 goto out_freepage;
8196 }
8197 if (err)
8198 goto out_freepage;
8199
8200 for (i = 0; i < flavors->num_flavors; i++) {
8201 secinfo = &flavors->flavors[i];
8202
8203 switch (secinfo->flavor) {
8204 case RPC_AUTH_NULL:
8205 case RPC_AUTH_UNIX:
8206 case RPC_AUTH_GSS:
8207 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8208 &secinfo->flavor_info);
8209 break;
8210 default:
8211 flavor = RPC_AUTH_MAXFLAVOR;
8212 break;
8213 }
8214
8215 if (!nfs_auth_info_match(&server->auth_info, flavor))
8216 flavor = RPC_AUTH_MAXFLAVOR;
8217
8218 if (flavor != RPC_AUTH_MAXFLAVOR) {
8219 err = nfs4_lookup_root_sec(server, fhandle,
8220 info, flavor);
8221 if (!err)
8222 break;
8223 }
8224 }
8225
8226 if (flavor == RPC_AUTH_MAXFLAVOR)
8227 err = -EPERM;
8228
8229 out_freepage:
8230 put_page(page);
8231 if (err == -EACCES)
8232 return -EPERM;
8233 out:
8234 return err;
8235 }
8236
8237 static int _nfs41_test_stateid(struct nfs_server *server,
8238 nfs4_stateid *stateid,
8239 struct rpc_cred *cred)
8240 {
8241 int status;
8242 struct nfs41_test_stateid_args args = {
8243 .stateid = stateid,
8244 };
8245 struct nfs41_test_stateid_res res;
8246 struct rpc_message msg = {
8247 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8248 .rpc_argp = &args,
8249 .rpc_resp = &res,
8250 .rpc_cred = cred,
8251 };
8252 struct rpc_clnt *rpc_client = server->client;
8253
8254 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8255 &rpc_client, &msg);
8256
8257 dprintk("NFS call test_stateid %p\n", stateid);
8258 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8259 nfs4_set_sequence_privileged(&args.seq_args);
8260 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8261 &args.seq_args, &res.seq_res);
8262 if (status != NFS_OK) {
8263 dprintk("NFS reply test_stateid: failed, %d\n", status);
8264 return status;
8265 }
8266 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8267 return -res.status;
8268 }
8269
8270 /**
8271 * nfs41_test_stateid - perform a TEST_STATEID operation
8272 *
8273 * @server: server / transport on which to perform the operation
8274 * @stateid: state ID to test
8275 * @cred: credential
8276 *
8277 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8278 * Otherwise a negative NFS4ERR value is returned if the operation
8279 * failed or the state ID is not currently valid.
8280 */
8281 static int nfs41_test_stateid(struct nfs_server *server,
8282 nfs4_stateid *stateid,
8283 struct rpc_cred *cred)
8284 {
8285 struct nfs4_exception exception = { };
8286 int err;
8287 do {
8288 err = _nfs41_test_stateid(server, stateid, cred);
8289 if (err != -NFS4ERR_DELAY)
8290 break;
8291 nfs4_handle_exception(server, err, &exception);
8292 } while (exception.retry);
8293 return err;
8294 }
8295
8296 struct nfs_free_stateid_data {
8297 struct nfs_server *server;
8298 struct nfs41_free_stateid_args args;
8299 struct nfs41_free_stateid_res res;
8300 };
8301
8302 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8303 {
8304 struct nfs_free_stateid_data *data = calldata;
8305 nfs41_setup_sequence(nfs4_get_session(data->server),
8306 &data->args.seq_args,
8307 &data->res.seq_res,
8308 task);
8309 }
8310
8311 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8312 {
8313 struct nfs_free_stateid_data *data = calldata;
8314
8315 nfs41_sequence_done(task, &data->res.seq_res);
8316
8317 switch (task->tk_status) {
8318 case -NFS4ERR_DELAY:
8319 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8320 rpc_restart_call_prepare(task);
8321 }
8322 }
8323
8324 static void nfs41_free_stateid_release(void *calldata)
8325 {
8326 kfree(calldata);
8327 }
8328
8329 static const struct rpc_call_ops nfs41_free_stateid_ops = {
8330 .rpc_call_prepare = nfs41_free_stateid_prepare,
8331 .rpc_call_done = nfs41_free_stateid_done,
8332 .rpc_release = nfs41_free_stateid_release,
8333 };
8334
8335 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8336 nfs4_stateid *stateid,
8337 struct rpc_cred *cred,
8338 bool privileged)
8339 {
8340 struct rpc_message msg = {
8341 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8342 .rpc_cred = cred,
8343 };
8344 struct rpc_task_setup task_setup = {
8345 .rpc_client = server->client,
8346 .rpc_message = &msg,
8347 .callback_ops = &nfs41_free_stateid_ops,
8348 .flags = RPC_TASK_ASYNC,
8349 };
8350 struct nfs_free_stateid_data *data;
8351
8352 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8353 &task_setup.rpc_client, &msg);
8354
8355 dprintk("NFS call free_stateid %p\n", stateid);
8356 data = kmalloc(sizeof(*data), GFP_NOFS);
8357 if (!data)
8358 return ERR_PTR(-ENOMEM);
8359 data->server = server;
8360 nfs4_stateid_copy(&data->args.stateid, stateid);
8361
8362 task_setup.callback_data = data;
8363
8364 msg.rpc_argp = &data->args;
8365 msg.rpc_resp = &data->res;
8366 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8367 if (privileged)
8368 nfs4_set_sequence_privileged(&data->args.seq_args);
8369
8370 return rpc_run_task(&task_setup);
8371 }
8372
8373 /**
8374 * nfs41_free_stateid - perform a FREE_STATEID operation
8375 *
8376 * @server: server / transport on which to perform the operation
8377 * @stateid: state ID to release
8378 * @cred: credential
8379 *
8380 * Returns NFS_OK if the server freed "stateid". Otherwise a
8381 * negative NFS4ERR value is returned.
8382 */
8383 static int nfs41_free_stateid(struct nfs_server *server,
8384 nfs4_stateid *stateid,
8385 struct rpc_cred *cred)
8386 {
8387 struct rpc_task *task;
8388 int ret;
8389
8390 task = _nfs41_free_stateid(server, stateid, cred, true);
8391 if (IS_ERR(task))
8392 return PTR_ERR(task);
8393 ret = rpc_wait_for_completion_task(task);
8394 if (!ret)
8395 ret = task->tk_status;
8396 rpc_put_task(task);
8397 return ret;
8398 }
8399
8400 static void
8401 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8402 {
8403 struct rpc_task *task;
8404 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8405
8406 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8407 nfs4_free_lock_state(server, lsp);
8408 if (IS_ERR(task))
8409 return;
8410 rpc_put_task(task);
8411 }
8412
8413 static bool nfs41_match_stateid(const nfs4_stateid *s1,
8414 const nfs4_stateid *s2)
8415 {
8416 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8417 return false;
8418
8419 if (s1->seqid == s2->seqid)
8420 return true;
8421 if (s1->seqid == 0 || s2->seqid == 0)
8422 return true;
8423
8424 return false;
8425 }
8426
8427 #endif /* CONFIG_NFS_V4_1 */
8428
8429 static bool nfs4_match_stateid(const nfs4_stateid *s1,
8430 const nfs4_stateid *s2)
8431 {
8432 return nfs4_stateid_match(s1, s2);
8433 }
8434
8435
8436 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8437 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8438 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8439 .recover_open = nfs4_open_reclaim,
8440 .recover_lock = nfs4_lock_reclaim,
8441 .establish_clid = nfs4_init_clientid,
8442 .detect_trunking = nfs40_discover_server_trunking,
8443 };
8444
8445 #if defined(CONFIG_NFS_V4_1)
8446 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8447 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8448 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8449 .recover_open = nfs4_open_reclaim,
8450 .recover_lock = nfs4_lock_reclaim,
8451 .establish_clid = nfs41_init_clientid,
8452 .reclaim_complete = nfs41_proc_reclaim_complete,
8453 .detect_trunking = nfs41_discover_server_trunking,
8454 };
8455 #endif /* CONFIG_NFS_V4_1 */
8456
8457 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8458 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8459 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8460 .recover_open = nfs40_open_expired,
8461 .recover_lock = nfs4_lock_expired,
8462 .establish_clid = nfs4_init_clientid,
8463 };
8464
8465 #if defined(CONFIG_NFS_V4_1)
8466 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8467 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8468 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8469 .recover_open = nfs41_open_expired,
8470 .recover_lock = nfs41_lock_expired,
8471 .establish_clid = nfs41_init_clientid,
8472 };
8473 #endif /* CONFIG_NFS_V4_1 */
8474
8475 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8476 .sched_state_renewal = nfs4_proc_async_renew,
8477 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8478 .renew_lease = nfs4_proc_renew,
8479 };
8480
8481 #if defined(CONFIG_NFS_V4_1)
8482 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8483 .sched_state_renewal = nfs41_proc_async_sequence,
8484 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8485 .renew_lease = nfs4_proc_sequence,
8486 };
8487 #endif
8488
8489 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8490 .get_locations = _nfs40_proc_get_locations,
8491 .fsid_present = _nfs40_proc_fsid_present,
8492 };
8493
8494 #if defined(CONFIG_NFS_V4_1)
8495 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8496 .get_locations = _nfs41_proc_get_locations,
8497 .fsid_present = _nfs41_proc_fsid_present,
8498 };
8499 #endif /* CONFIG_NFS_V4_1 */
8500
8501 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8502 .minor_version = 0,
8503 .init_caps = NFS_CAP_READDIRPLUS
8504 | NFS_CAP_ATOMIC_OPEN
8505 | NFS_CAP_CHANGE_ATTR
8506 | NFS_CAP_POSIX_LOCK,
8507 .init_client = nfs40_init_client,
8508 .shutdown_client = nfs40_shutdown_client,
8509 .match_stateid = nfs4_match_stateid,
8510 .find_root_sec = nfs4_find_root_sec,
8511 .free_lock_state = nfs4_release_lockowner,
8512 .alloc_seqid = nfs_alloc_seqid,
8513 .call_sync_ops = &nfs40_call_sync_ops,
8514 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8515 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8516 .state_renewal_ops = &nfs40_state_renewal_ops,
8517 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8518 };
8519
8520 #if defined(CONFIG_NFS_V4_1)
8521 static struct nfs_seqid *
8522 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8523 {
8524 return NULL;
8525 }
8526
8527 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8528 .minor_version = 1,
8529 .init_caps = NFS_CAP_READDIRPLUS
8530 | NFS_CAP_ATOMIC_OPEN
8531 | NFS_CAP_CHANGE_ATTR
8532 | NFS_CAP_POSIX_LOCK
8533 | NFS_CAP_STATEID_NFSV41
8534 | NFS_CAP_ATOMIC_OPEN_V1,
8535 .init_client = nfs41_init_client,
8536 .shutdown_client = nfs41_shutdown_client,
8537 .match_stateid = nfs41_match_stateid,
8538 .find_root_sec = nfs41_find_root_sec,
8539 .free_lock_state = nfs41_free_lock_state,
8540 .alloc_seqid = nfs_alloc_no_seqid,
8541 .call_sync_ops = &nfs41_call_sync_ops,
8542 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8543 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8544 .state_renewal_ops = &nfs41_state_renewal_ops,
8545 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8546 };
8547 #endif
8548
8549 #if defined(CONFIG_NFS_V4_2)
8550 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8551 .minor_version = 2,
8552 .init_caps = NFS_CAP_READDIRPLUS
8553 | NFS_CAP_ATOMIC_OPEN
8554 | NFS_CAP_CHANGE_ATTR
8555 | NFS_CAP_POSIX_LOCK
8556 | NFS_CAP_STATEID_NFSV41
8557 | NFS_CAP_ATOMIC_OPEN_V1
8558 | NFS_CAP_ALLOCATE
8559 | NFS_CAP_DEALLOCATE
8560 | NFS_CAP_SEEK,
8561 .init_client = nfs41_init_client,
8562 .shutdown_client = nfs41_shutdown_client,
8563 .match_stateid = nfs41_match_stateid,
8564 .find_root_sec = nfs41_find_root_sec,
8565 .free_lock_state = nfs41_free_lock_state,
8566 .call_sync_ops = &nfs41_call_sync_ops,
8567 .alloc_seqid = nfs_alloc_no_seqid,
8568 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8569 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8570 .state_renewal_ops = &nfs41_state_renewal_ops,
8571 };
8572 #endif
8573
8574 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8575 [0] = &nfs_v4_0_minor_ops,
8576 #if defined(CONFIG_NFS_V4_1)
8577 [1] = &nfs_v4_1_minor_ops,
8578 #endif
8579 #if defined(CONFIG_NFS_V4_2)
8580 [2] = &nfs_v4_2_minor_ops,
8581 #endif
8582 };
8583
8584 static const struct inode_operations nfs4_dir_inode_operations = {
8585 .create = nfs_create,
8586 .lookup = nfs_lookup,
8587 .atomic_open = nfs_atomic_open,
8588 .link = nfs_link,
8589 .unlink = nfs_unlink,
8590 .symlink = nfs_symlink,
8591 .mkdir = nfs_mkdir,
8592 .rmdir = nfs_rmdir,
8593 .mknod = nfs_mknod,
8594 .rename = nfs_rename,
8595 .permission = nfs_permission,
8596 .getattr = nfs_getattr,
8597 .setattr = nfs_setattr,
8598 .getxattr = generic_getxattr,
8599 .setxattr = generic_setxattr,
8600 .listxattr = generic_listxattr,
8601 .removexattr = generic_removexattr,
8602 };
8603
8604 static const struct inode_operations nfs4_file_inode_operations = {
8605 .permission = nfs_permission,
8606 .getattr = nfs_getattr,
8607 .setattr = nfs_setattr,
8608 .getxattr = generic_getxattr,
8609 .setxattr = generic_setxattr,
8610 .listxattr = generic_listxattr,
8611 .removexattr = generic_removexattr,
8612 };
8613
8614 const struct nfs_rpc_ops nfs_v4_clientops = {
8615 .version = 4, /* protocol version */
8616 .dentry_ops = &nfs4_dentry_operations,
8617 .dir_inode_ops = &nfs4_dir_inode_operations,
8618 .file_inode_ops = &nfs4_file_inode_operations,
8619 .file_ops = &nfs4_file_operations,
8620 .getroot = nfs4_proc_get_root,
8621 .submount = nfs4_submount,
8622 .try_mount = nfs4_try_mount,
8623 .getattr = nfs4_proc_getattr,
8624 .setattr = nfs4_proc_setattr,
8625 .lookup = nfs4_proc_lookup,
8626 .access = nfs4_proc_access,
8627 .readlink = nfs4_proc_readlink,
8628 .create = nfs4_proc_create,
8629 .remove = nfs4_proc_remove,
8630 .unlink_setup = nfs4_proc_unlink_setup,
8631 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8632 .unlink_done = nfs4_proc_unlink_done,
8633 .rename_setup = nfs4_proc_rename_setup,
8634 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8635 .rename_done = nfs4_proc_rename_done,
8636 .link = nfs4_proc_link,
8637 .symlink = nfs4_proc_symlink,
8638 .mkdir = nfs4_proc_mkdir,
8639 .rmdir = nfs4_proc_remove,
8640 .readdir = nfs4_proc_readdir,
8641 .mknod = nfs4_proc_mknod,
8642 .statfs = nfs4_proc_statfs,
8643 .fsinfo = nfs4_proc_fsinfo,
8644 .pathconf = nfs4_proc_pathconf,
8645 .set_capabilities = nfs4_server_capabilities,
8646 .decode_dirent = nfs4_decode_dirent,
8647 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8648 .read_setup = nfs4_proc_read_setup,
8649 .read_done = nfs4_read_done,
8650 .write_setup = nfs4_proc_write_setup,
8651 .write_done = nfs4_write_done,
8652 .commit_setup = nfs4_proc_commit_setup,
8653 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
8654 .commit_done = nfs4_commit_done,
8655 .lock = nfs4_proc_lock,
8656 .clear_acl_cache = nfs4_zap_acl_attr,
8657 .close_context = nfs4_close_context,
8658 .open_context = nfs4_atomic_open,
8659 .have_delegation = nfs4_have_delegation,
8660 .return_delegation = nfs4_inode_return_delegation,
8661 .alloc_client = nfs4_alloc_client,
8662 .init_client = nfs4_init_client,
8663 .free_client = nfs4_free_client,
8664 .create_server = nfs4_create_server,
8665 .clone_server = nfs_clone_server,
8666 };
8667
8668 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
8669 .prefix = XATTR_NAME_NFSV4_ACL,
8670 .list = nfs4_xattr_list_nfs4_acl,
8671 .get = nfs4_xattr_get_nfs4_acl,
8672 .set = nfs4_xattr_set_nfs4_acl,
8673 };
8674
8675 const struct xattr_handler *nfs4_xattr_handlers[] = {
8676 &nfs4_xattr_nfs4_acl_handler,
8677 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
8678 &nfs4_xattr_nfs4_label_handler,
8679 #endif
8680 NULL
8681 };
8682
8683 /*
8684 * Local variables:
8685 * c-basic-offset: 8
8686 * End:
8687 */