]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/nfs/nfs4proc.c
NFS: Add sequence_priviliged_ops for nfs4_proc_sequence()
[mirror_ubuntu-artful-kernel.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/nfs_idmap.h>
55 #include <linux/sunrpc/bc_xprt.h>
56 #include <linux/xattr.h>
57 #include <linux/utsname.h>
58 #include <linux/freezer.h>
59
60 #include "nfs4_fs.h"
61 #include "delegation.h"
62 #include "internal.h"
63 #include "iostat.h"
64 #include "callback.h"
65 #include "pnfs.h"
66 #include "netns.h"
67
68 #define NFSDBG_FACILITY NFSDBG_PROC
69
70 #define NFS4_POLL_RETRY_MIN (HZ/10)
71 #define NFS4_POLL_RETRY_MAX (15*HZ)
72
73 #define NFS4_MAX_LOOP_ON_RECOVER (10)
74
75 struct nfs4_opendata;
76 static int _nfs4_proc_open(struct nfs4_opendata *data);
77 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
78 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
79 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
80 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
81 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
82 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
83 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
84 struct nfs_fattr *fattr, struct iattr *sattr,
85 struct nfs4_state *state);
86 #ifdef CONFIG_NFS_V4_1
87 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
88 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
89 #endif
90 /* Prevent leaks of NFSv4 errors into userland */
91 static int nfs4_map_errors(int err)
92 {
93 if (err >= -1000)
94 return err;
95 switch (err) {
96 case -NFS4ERR_RESOURCE:
97 return -EREMOTEIO;
98 case -NFS4ERR_WRONGSEC:
99 return -EPERM;
100 case -NFS4ERR_BADOWNER:
101 case -NFS4ERR_BADNAME:
102 return -EINVAL;
103 case -NFS4ERR_SHARE_DENIED:
104 return -EACCES;
105 case -NFS4ERR_MINOR_VERS_MISMATCH:
106 return -EPROTONOSUPPORT;
107 case -NFS4ERR_ACCESS:
108 return -EACCES;
109 default:
110 dprintk("%s could not handle NFSv4 error %d\n",
111 __func__, -err);
112 break;
113 }
114 return -EIO;
115 }
116
117 /*
118 * This is our standard bitmap for GETATTR requests.
119 */
120 const u32 nfs4_fattr_bitmap[3] = {
121 FATTR4_WORD0_TYPE
122 | FATTR4_WORD0_CHANGE
123 | FATTR4_WORD0_SIZE
124 | FATTR4_WORD0_FSID
125 | FATTR4_WORD0_FILEID,
126 FATTR4_WORD1_MODE
127 | FATTR4_WORD1_NUMLINKS
128 | FATTR4_WORD1_OWNER
129 | FATTR4_WORD1_OWNER_GROUP
130 | FATTR4_WORD1_RAWDEV
131 | FATTR4_WORD1_SPACE_USED
132 | FATTR4_WORD1_TIME_ACCESS
133 | FATTR4_WORD1_TIME_METADATA
134 | FATTR4_WORD1_TIME_MODIFY
135 };
136
137 static const u32 nfs4_pnfs_open_bitmap[3] = {
138 FATTR4_WORD0_TYPE
139 | FATTR4_WORD0_CHANGE
140 | FATTR4_WORD0_SIZE
141 | FATTR4_WORD0_FSID
142 | FATTR4_WORD0_FILEID,
143 FATTR4_WORD1_MODE
144 | FATTR4_WORD1_NUMLINKS
145 | FATTR4_WORD1_OWNER
146 | FATTR4_WORD1_OWNER_GROUP
147 | FATTR4_WORD1_RAWDEV
148 | FATTR4_WORD1_SPACE_USED
149 | FATTR4_WORD1_TIME_ACCESS
150 | FATTR4_WORD1_TIME_METADATA
151 | FATTR4_WORD1_TIME_MODIFY,
152 FATTR4_WORD2_MDSTHRESHOLD
153 };
154
155 static const u32 nfs4_open_noattr_bitmap[3] = {
156 FATTR4_WORD0_TYPE
157 | FATTR4_WORD0_CHANGE
158 | FATTR4_WORD0_FILEID,
159 };
160
161 const u32 nfs4_statfs_bitmap[2] = {
162 FATTR4_WORD0_FILES_AVAIL
163 | FATTR4_WORD0_FILES_FREE
164 | FATTR4_WORD0_FILES_TOTAL,
165 FATTR4_WORD1_SPACE_AVAIL
166 | FATTR4_WORD1_SPACE_FREE
167 | FATTR4_WORD1_SPACE_TOTAL
168 };
169
170 const u32 nfs4_pathconf_bitmap[2] = {
171 FATTR4_WORD0_MAXLINK
172 | FATTR4_WORD0_MAXNAME,
173 0
174 };
175
176 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
177 | FATTR4_WORD0_MAXREAD
178 | FATTR4_WORD0_MAXWRITE
179 | FATTR4_WORD0_LEASE_TIME,
180 FATTR4_WORD1_TIME_DELTA
181 | FATTR4_WORD1_FS_LAYOUT_TYPES,
182 FATTR4_WORD2_LAYOUT_BLKSIZE
183 };
184
185 const u32 nfs4_fs_locations_bitmap[2] = {
186 FATTR4_WORD0_TYPE
187 | FATTR4_WORD0_CHANGE
188 | FATTR4_WORD0_SIZE
189 | FATTR4_WORD0_FSID
190 | FATTR4_WORD0_FILEID
191 | FATTR4_WORD0_FS_LOCATIONS,
192 FATTR4_WORD1_MODE
193 | FATTR4_WORD1_NUMLINKS
194 | FATTR4_WORD1_OWNER
195 | FATTR4_WORD1_OWNER_GROUP
196 | FATTR4_WORD1_RAWDEV
197 | FATTR4_WORD1_SPACE_USED
198 | FATTR4_WORD1_TIME_ACCESS
199 | FATTR4_WORD1_TIME_METADATA
200 | FATTR4_WORD1_TIME_MODIFY
201 | FATTR4_WORD1_MOUNTED_ON_FILEID
202 };
203
204 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
205 struct nfs4_readdir_arg *readdir)
206 {
207 __be32 *start, *p;
208
209 if (cookie > 2) {
210 readdir->cookie = cookie;
211 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
212 return;
213 }
214
215 readdir->cookie = 0;
216 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
217 if (cookie == 2)
218 return;
219
220 /*
221 * NFSv4 servers do not return entries for '.' and '..'
222 * Therefore, we fake these entries here. We let '.'
223 * have cookie 0 and '..' have cookie 1. Note that
224 * when talking to the server, we always send cookie 0
225 * instead of 1 or 2.
226 */
227 start = p = kmap_atomic(*readdir->pages);
228
229 if (cookie == 0) {
230 *p++ = xdr_one; /* next */
231 *p++ = xdr_zero; /* cookie, first word */
232 *p++ = xdr_one; /* cookie, second word */
233 *p++ = xdr_one; /* entry len */
234 memcpy(p, ".\0\0\0", 4); /* entry */
235 p++;
236 *p++ = xdr_one; /* bitmap length */
237 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
238 *p++ = htonl(8); /* attribute buffer length */
239 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
240 }
241
242 *p++ = xdr_one; /* next */
243 *p++ = xdr_zero; /* cookie, first word */
244 *p++ = xdr_two; /* cookie, second word */
245 *p++ = xdr_two; /* entry len */
246 memcpy(p, "..\0\0", 4); /* entry */
247 p++;
248 *p++ = xdr_one; /* bitmap length */
249 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
250 *p++ = htonl(8); /* attribute buffer length */
251 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
252
253 readdir->pgbase = (char *)p - (char *)start;
254 readdir->count -= readdir->pgbase;
255 kunmap_atomic(start);
256 }
257
258 static int nfs4_wait_clnt_recover(struct nfs_client *clp)
259 {
260 int res;
261
262 might_sleep();
263
264 res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
265 nfs_wait_bit_killable, TASK_KILLABLE);
266 if (res)
267 return res;
268
269 if (clp->cl_cons_state < 0)
270 return clp->cl_cons_state;
271 return 0;
272 }
273
274 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
275 {
276 int res = 0;
277
278 might_sleep();
279
280 if (*timeout <= 0)
281 *timeout = NFS4_POLL_RETRY_MIN;
282 if (*timeout > NFS4_POLL_RETRY_MAX)
283 *timeout = NFS4_POLL_RETRY_MAX;
284 freezable_schedule_timeout_killable(*timeout);
285 if (fatal_signal_pending(current))
286 res = -ERESTARTSYS;
287 *timeout <<= 1;
288 return res;
289 }
290
291 /* This is the error handling routine for processes that are allowed
292 * to sleep.
293 */
294 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
295 {
296 struct nfs_client *clp = server->nfs_client;
297 struct nfs4_state *state = exception->state;
298 struct inode *inode = exception->inode;
299 int ret = errorcode;
300
301 exception->retry = 0;
302 switch(errorcode) {
303 case 0:
304 return 0;
305 case -NFS4ERR_OPENMODE:
306 if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
307 nfs4_inode_return_delegation(inode);
308 exception->retry = 1;
309 return 0;
310 }
311 if (state == NULL)
312 break;
313 nfs4_schedule_stateid_recovery(server, state);
314 goto wait_on_recovery;
315 case -NFS4ERR_DELEG_REVOKED:
316 case -NFS4ERR_ADMIN_REVOKED:
317 case -NFS4ERR_BAD_STATEID:
318 if (state == NULL)
319 break;
320 nfs_remove_bad_delegation(state->inode);
321 nfs4_schedule_stateid_recovery(server, state);
322 goto wait_on_recovery;
323 case -NFS4ERR_EXPIRED:
324 if (state != NULL)
325 nfs4_schedule_stateid_recovery(server, state);
326 case -NFS4ERR_STALE_STATEID:
327 case -NFS4ERR_STALE_CLIENTID:
328 nfs4_schedule_lease_recovery(clp);
329 goto wait_on_recovery;
330 #if defined(CONFIG_NFS_V4_1)
331 case -NFS4ERR_BADSESSION:
332 case -NFS4ERR_BADSLOT:
333 case -NFS4ERR_BAD_HIGH_SLOT:
334 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
335 case -NFS4ERR_DEADSESSION:
336 case -NFS4ERR_SEQ_FALSE_RETRY:
337 case -NFS4ERR_SEQ_MISORDERED:
338 dprintk("%s ERROR: %d Reset session\n", __func__,
339 errorcode);
340 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
341 goto wait_on_recovery;
342 #endif /* defined(CONFIG_NFS_V4_1) */
343 case -NFS4ERR_FILE_OPEN:
344 if (exception->timeout > HZ) {
345 /* We have retried a decent amount, time to
346 * fail
347 */
348 ret = -EBUSY;
349 break;
350 }
351 case -NFS4ERR_GRACE:
352 case -NFS4ERR_DELAY:
353 case -EKEYEXPIRED:
354 ret = nfs4_delay(server->client, &exception->timeout);
355 if (ret != 0)
356 break;
357 case -NFS4ERR_RETRY_UNCACHED_REP:
358 case -NFS4ERR_OLD_STATEID:
359 exception->retry = 1;
360 break;
361 case -NFS4ERR_BADOWNER:
362 /* The following works around a Linux server bug! */
363 case -NFS4ERR_BADNAME:
364 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
365 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
366 exception->retry = 1;
367 printk(KERN_WARNING "NFS: v4 server %s "
368 "does not accept raw "
369 "uid/gids. "
370 "Reenabling the idmapper.\n",
371 server->nfs_client->cl_hostname);
372 }
373 }
374 /* We failed to handle the error */
375 return nfs4_map_errors(ret);
376 wait_on_recovery:
377 ret = nfs4_wait_clnt_recover(clp);
378 if (ret == 0)
379 exception->retry = 1;
380 return ret;
381 }
382
383
384 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
385 {
386 spin_lock(&clp->cl_lock);
387 if (time_before(clp->cl_last_renewal,timestamp))
388 clp->cl_last_renewal = timestamp;
389 spin_unlock(&clp->cl_lock);
390 }
391
392 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
393 {
394 do_renew_lease(server->nfs_client, timestamp);
395 }
396
397 #if defined(CONFIG_NFS_V4_1)
398
399 /*
400 * nfs4_free_slot - free a slot and efficiently update slot table.
401 *
402 * freeing a slot is trivially done by clearing its respective bit
403 * in the bitmap.
404 * If the freed slotid equals highest_used_slotid we want to update it
405 * so that the server would be able to size down the slot table if needed,
406 * otherwise we know that the highest_used_slotid is still in use.
407 * When updating highest_used_slotid there may be "holes" in the bitmap
408 * so we need to scan down from highest_used_slotid to 0 looking for the now
409 * highest slotid in use.
410 * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
411 *
412 * Must be called while holding tbl->slot_tbl_lock
413 */
414 static void
415 nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid)
416 {
417 /* clear used bit in bitmap */
418 __clear_bit(slotid, tbl->used_slots);
419
420 /* update highest_used_slotid when it is freed */
421 if (slotid == tbl->highest_used_slotid) {
422 slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
423 if (slotid < tbl->max_slots)
424 tbl->highest_used_slotid = slotid;
425 else
426 tbl->highest_used_slotid = NFS4_NO_SLOT;
427 }
428 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
429 slotid, tbl->highest_used_slotid);
430 }
431
432 bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
433 {
434 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
435 return true;
436 }
437
438 /*
439 * Signal state manager thread if session fore channel is drained
440 */
441 static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
442 {
443 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
444 rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
445 nfs4_set_task_privileged, NULL);
446 return;
447 }
448
449 if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
450 return;
451
452 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
453 complete(&ses->fc_slot_table.complete);
454 }
455
456 /*
457 * Signal state manager thread if session back channel is drained
458 */
459 void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
460 {
461 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
462 ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
463 return;
464 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
465 complete(&ses->bc_slot_table.complete);
466 }
467
468 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
469 {
470 struct nfs4_slot_table *tbl;
471
472 tbl = &res->sr_session->fc_slot_table;
473 if (!res->sr_slot) {
474 /* just wake up the next guy waiting since
475 * we may have not consumed a slot after all */
476 dprintk("%s: No slot\n", __func__);
477 return;
478 }
479
480 spin_lock(&tbl->slot_tbl_lock);
481 nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
482 nfs4_check_drain_fc_complete(res->sr_session);
483 spin_unlock(&tbl->slot_tbl_lock);
484 res->sr_slot = NULL;
485 }
486
487 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
488 {
489 unsigned long timestamp;
490 struct nfs_client *clp;
491
492 /*
493 * sr_status remains 1 if an RPC level error occurred. The server
494 * may or may not have processed the sequence operation..
495 * Proceed as if the server received and processed the sequence
496 * operation.
497 */
498 if (res->sr_status == 1)
499 res->sr_status = NFS_OK;
500
501 /* don't increment the sequence number if the task wasn't sent */
502 if (!RPC_WAS_SENT(task))
503 goto out;
504
505 /* Check the SEQUENCE operation status */
506 switch (res->sr_status) {
507 case 0:
508 /* Update the slot's sequence and clientid lease timer */
509 ++res->sr_slot->seq_nr;
510 timestamp = res->sr_renewal_time;
511 clp = res->sr_session->clp;
512 do_renew_lease(clp, timestamp);
513 /* Check sequence flags */
514 if (res->sr_status_flags != 0)
515 nfs4_schedule_lease_recovery(clp);
516 break;
517 case -NFS4ERR_DELAY:
518 /* The server detected a resend of the RPC call and
519 * returned NFS4ERR_DELAY as per Section 2.10.6.2
520 * of RFC5661.
521 */
522 dprintk("%s: slot=%td seq=%d: Operation in progress\n",
523 __func__,
524 res->sr_slot - res->sr_session->fc_slot_table.slots,
525 res->sr_slot->seq_nr);
526 goto out_retry;
527 default:
528 /* Just update the slot sequence no. */
529 ++res->sr_slot->seq_nr;
530 }
531 out:
532 /* The session may be reset by one of the error handlers. */
533 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
534 nfs41_sequence_free_slot(res);
535 return 1;
536 out_retry:
537 if (!rpc_restart_call(task))
538 goto out;
539 rpc_delay(task, NFS4_POLL_RETRY_MAX);
540 return 0;
541 }
542
543 static int nfs4_sequence_done(struct rpc_task *task,
544 struct nfs4_sequence_res *res)
545 {
546 if (res->sr_session == NULL)
547 return 1;
548 return nfs41_sequence_done(task, res);
549 }
550
551 /*
552 * nfs4_find_slot - efficiently look for a free slot
553 *
554 * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
555 * If found, we mark the slot as used, update the highest_used_slotid,
556 * and respectively set up the sequence operation args.
557 * The slot number is returned if found, or NFS4_NO_SLOT otherwise.
558 *
559 * Note: must be called with under the slot_tbl_lock.
560 */
561 static u32
562 nfs4_find_slot(struct nfs4_slot_table *tbl)
563 {
564 u32 slotid;
565 u32 ret_id = NFS4_NO_SLOT;
566
567 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
568 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
569 tbl->max_slots);
570 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
571 if (slotid >= tbl->max_slots)
572 goto out;
573 __set_bit(slotid, tbl->used_slots);
574 if (slotid > tbl->highest_used_slotid ||
575 tbl->highest_used_slotid == NFS4_NO_SLOT)
576 tbl->highest_used_slotid = slotid;
577 ret_id = slotid;
578 out:
579 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
580 __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
581 return ret_id;
582 }
583
584 static void nfs41_init_sequence(struct nfs4_sequence_args *args,
585 struct nfs4_sequence_res *res, int cache_reply)
586 {
587 args->sa_session = NULL;
588 args->sa_cache_this = 0;
589 if (cache_reply)
590 args->sa_cache_this = 1;
591 res->sr_session = NULL;
592 res->sr_slot = NULL;
593 }
594
595 int nfs41_setup_sequence(struct nfs4_session *session,
596 struct nfs4_sequence_args *args,
597 struct nfs4_sequence_res *res,
598 struct rpc_task *task)
599 {
600 struct nfs4_slot *slot;
601 struct nfs4_slot_table *tbl;
602 u32 slotid;
603
604 dprintk("--> %s\n", __func__);
605 /* slot already allocated? */
606 if (res->sr_slot != NULL)
607 return 0;
608
609 tbl = &session->fc_slot_table;
610
611 spin_lock(&tbl->slot_tbl_lock);
612 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
613 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
614 /* The state manager will wait until the slot table is empty */
615 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
616 spin_unlock(&tbl->slot_tbl_lock);
617 dprintk("%s session is draining\n", __func__);
618 return -EAGAIN;
619 }
620
621 if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
622 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
623 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
624 spin_unlock(&tbl->slot_tbl_lock);
625 dprintk("%s enforce FIFO order\n", __func__);
626 return -EAGAIN;
627 }
628
629 slotid = nfs4_find_slot(tbl);
630 if (slotid == NFS4_NO_SLOT) {
631 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
632 spin_unlock(&tbl->slot_tbl_lock);
633 dprintk("<-- %s: no free slots\n", __func__);
634 return -EAGAIN;
635 }
636 spin_unlock(&tbl->slot_tbl_lock);
637
638 rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
639 slot = tbl->slots + slotid;
640 args->sa_session = session;
641 args->sa_slotid = slotid;
642
643 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
644
645 res->sr_session = session;
646 res->sr_slot = slot;
647 res->sr_renewal_time = jiffies;
648 res->sr_status_flags = 0;
649 /*
650 * sr_status is only set in decode_sequence, and so will remain
651 * set to 1 if an rpc level failure occurs.
652 */
653 res->sr_status = 1;
654 return 0;
655 }
656 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
657
658 int nfs4_setup_sequence(const struct nfs_server *server,
659 struct nfs4_sequence_args *args,
660 struct nfs4_sequence_res *res,
661 struct rpc_task *task)
662 {
663 struct nfs4_session *session = nfs4_get_session(server);
664 int ret = 0;
665
666 if (session == NULL)
667 goto out;
668
669 dprintk("--> %s clp %p session %p sr_slot %td\n",
670 __func__, session->clp, session, res->sr_slot ?
671 res->sr_slot - session->fc_slot_table.slots : -1);
672
673 ret = nfs41_setup_sequence(session, args, res, task);
674 out:
675 dprintk("<-- %s status=%d\n", __func__, ret);
676 return ret;
677 }
678
679 struct nfs41_call_sync_data {
680 const struct nfs_server *seq_server;
681 struct nfs4_sequence_args *seq_args;
682 struct nfs4_sequence_res *seq_res;
683 };
684
685 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
686 {
687 struct nfs41_call_sync_data *data = calldata;
688
689 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
690
691 if (nfs4_setup_sequence(data->seq_server, data->seq_args,
692 data->seq_res, task))
693 return;
694 rpc_call_start(task);
695 }
696
697 static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
698 {
699 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
700 nfs41_call_sync_prepare(task, calldata);
701 }
702
703 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
704 {
705 struct nfs41_call_sync_data *data = calldata;
706
707 nfs41_sequence_done(task, data->seq_res);
708 }
709
710 static const struct rpc_call_ops nfs41_call_sync_ops = {
711 .rpc_call_prepare = nfs41_call_sync_prepare,
712 .rpc_call_done = nfs41_call_sync_done,
713 };
714
715 static const struct rpc_call_ops nfs41_call_priv_sync_ops = {
716 .rpc_call_prepare = nfs41_call_priv_sync_prepare,
717 .rpc_call_done = nfs41_call_sync_done,
718 };
719
720 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
721 struct nfs_server *server,
722 struct rpc_message *msg,
723 struct nfs4_sequence_args *args,
724 struct nfs4_sequence_res *res,
725 int privileged)
726 {
727 int ret;
728 struct rpc_task *task;
729 struct nfs41_call_sync_data data = {
730 .seq_server = server,
731 .seq_args = args,
732 .seq_res = res,
733 };
734 struct rpc_task_setup task_setup = {
735 .rpc_client = clnt,
736 .rpc_message = msg,
737 .callback_ops = &nfs41_call_sync_ops,
738 .callback_data = &data
739 };
740
741 if (privileged)
742 task_setup.callback_ops = &nfs41_call_priv_sync_ops;
743 task = rpc_run_task(&task_setup);
744 if (IS_ERR(task))
745 ret = PTR_ERR(task);
746 else {
747 ret = task->tk_status;
748 rpc_put_task(task);
749 }
750 return ret;
751 }
752
753 int _nfs4_call_sync_session(struct rpc_clnt *clnt,
754 struct nfs_server *server,
755 struct rpc_message *msg,
756 struct nfs4_sequence_args *args,
757 struct nfs4_sequence_res *res,
758 int cache_reply)
759 {
760 nfs41_init_sequence(args, res, cache_reply);
761 return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
762 }
763
764 #else
765 static inline
766 void nfs41_init_sequence(struct nfs4_sequence_args *args,
767 struct nfs4_sequence_res *res, int cache_reply)
768 {
769 }
770
771 static int nfs4_sequence_done(struct rpc_task *task,
772 struct nfs4_sequence_res *res)
773 {
774 return 1;
775 }
776 #endif /* CONFIG_NFS_V4_1 */
777
778 int _nfs4_call_sync(struct rpc_clnt *clnt,
779 struct nfs_server *server,
780 struct rpc_message *msg,
781 struct nfs4_sequence_args *args,
782 struct nfs4_sequence_res *res,
783 int cache_reply)
784 {
785 nfs41_init_sequence(args, res, cache_reply);
786 return rpc_call_sync(clnt, msg, 0);
787 }
788
789 static inline
790 int nfs4_call_sync(struct rpc_clnt *clnt,
791 struct nfs_server *server,
792 struct rpc_message *msg,
793 struct nfs4_sequence_args *args,
794 struct nfs4_sequence_res *res,
795 int cache_reply)
796 {
797 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
798 args, res, cache_reply);
799 }
800
801 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
802 {
803 struct nfs_inode *nfsi = NFS_I(dir);
804
805 spin_lock(&dir->i_lock);
806 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
807 if (!cinfo->atomic || cinfo->before != dir->i_version)
808 nfs_force_lookup_revalidate(dir);
809 dir->i_version = cinfo->after;
810 spin_unlock(&dir->i_lock);
811 }
812
813 struct nfs4_opendata {
814 struct kref kref;
815 struct nfs_openargs o_arg;
816 struct nfs_openres o_res;
817 struct nfs_open_confirmargs c_arg;
818 struct nfs_open_confirmres c_res;
819 struct nfs4_string owner_name;
820 struct nfs4_string group_name;
821 struct nfs_fattr f_attr;
822 struct dentry *dir;
823 struct dentry *dentry;
824 struct nfs4_state_owner *owner;
825 struct nfs4_state *state;
826 struct iattr attrs;
827 unsigned long timestamp;
828 unsigned int rpc_done : 1;
829 int rpc_status;
830 int cancelled;
831 };
832
833
834 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
835 {
836 p->o_res.f_attr = &p->f_attr;
837 p->o_res.seqid = p->o_arg.seqid;
838 p->c_res.seqid = p->c_arg.seqid;
839 p->o_res.server = p->o_arg.server;
840 p->o_res.access_request = p->o_arg.access;
841 nfs_fattr_init(&p->f_attr);
842 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
843 }
844
845 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
846 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
847 const struct iattr *attrs,
848 gfp_t gfp_mask)
849 {
850 struct dentry *parent = dget_parent(dentry);
851 struct inode *dir = parent->d_inode;
852 struct nfs_server *server = NFS_SERVER(dir);
853 struct nfs4_opendata *p;
854
855 p = kzalloc(sizeof(*p), gfp_mask);
856 if (p == NULL)
857 goto err;
858 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
859 if (p->o_arg.seqid == NULL)
860 goto err_free;
861 nfs_sb_active(dentry->d_sb);
862 p->dentry = dget(dentry);
863 p->dir = parent;
864 p->owner = sp;
865 atomic_inc(&sp->so_count);
866 p->o_arg.fh = NFS_FH(dir);
867 p->o_arg.open_flags = flags;
868 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
869 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
870 * will return permission denied for all bits until close */
871 if (!(flags & O_EXCL)) {
872 /* ask server to check for all possible rights as results
873 * are cached */
874 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
875 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
876 }
877 p->o_arg.clientid = server->nfs_client->cl_clientid;
878 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
879 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
880 p->o_arg.name = &dentry->d_name;
881 p->o_arg.server = server;
882 p->o_arg.bitmask = server->attr_bitmask;
883 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
884 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
885 if (attrs != NULL && attrs->ia_valid != 0) {
886 __be32 verf[2];
887
888 p->o_arg.u.attrs = &p->attrs;
889 memcpy(&p->attrs, attrs, sizeof(p->attrs));
890
891 verf[0] = jiffies;
892 verf[1] = current->pid;
893 memcpy(p->o_arg.u.verifier.data, verf,
894 sizeof(p->o_arg.u.verifier.data));
895 }
896 p->c_arg.fh = &p->o_res.fh;
897 p->c_arg.stateid = &p->o_res.stateid;
898 p->c_arg.seqid = p->o_arg.seqid;
899 nfs4_init_opendata_res(p);
900 kref_init(&p->kref);
901 return p;
902 err_free:
903 kfree(p);
904 err:
905 dput(parent);
906 return NULL;
907 }
908
909 static void nfs4_opendata_free(struct kref *kref)
910 {
911 struct nfs4_opendata *p = container_of(kref,
912 struct nfs4_opendata, kref);
913 struct super_block *sb = p->dentry->d_sb;
914
915 nfs_free_seqid(p->o_arg.seqid);
916 if (p->state != NULL)
917 nfs4_put_open_state(p->state);
918 nfs4_put_state_owner(p->owner);
919 dput(p->dir);
920 dput(p->dentry);
921 nfs_sb_deactive(sb);
922 nfs_fattr_free_names(&p->f_attr);
923 kfree(p);
924 }
925
926 static void nfs4_opendata_put(struct nfs4_opendata *p)
927 {
928 if (p != NULL)
929 kref_put(&p->kref, nfs4_opendata_free);
930 }
931
932 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
933 {
934 int ret;
935
936 ret = rpc_wait_for_completion_task(task);
937 return ret;
938 }
939
940 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
941 {
942 int ret = 0;
943
944 if (open_mode & (O_EXCL|O_TRUNC))
945 goto out;
946 switch (mode & (FMODE_READ|FMODE_WRITE)) {
947 case FMODE_READ:
948 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
949 && state->n_rdonly != 0;
950 break;
951 case FMODE_WRITE:
952 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
953 && state->n_wronly != 0;
954 break;
955 case FMODE_READ|FMODE_WRITE:
956 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
957 && state->n_rdwr != 0;
958 }
959 out:
960 return ret;
961 }
962
963 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
964 {
965 if (delegation == NULL)
966 return 0;
967 if ((delegation->type & fmode) != fmode)
968 return 0;
969 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
970 return 0;
971 nfs_mark_delegation_referenced(delegation);
972 return 1;
973 }
974
975 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
976 {
977 switch (fmode) {
978 case FMODE_WRITE:
979 state->n_wronly++;
980 break;
981 case FMODE_READ:
982 state->n_rdonly++;
983 break;
984 case FMODE_READ|FMODE_WRITE:
985 state->n_rdwr++;
986 }
987 nfs4_state_set_mode_locked(state, state->state | fmode);
988 }
989
990 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
991 {
992 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
993 nfs4_stateid_copy(&state->stateid, stateid);
994 nfs4_stateid_copy(&state->open_stateid, stateid);
995 switch (fmode) {
996 case FMODE_READ:
997 set_bit(NFS_O_RDONLY_STATE, &state->flags);
998 break;
999 case FMODE_WRITE:
1000 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1001 break;
1002 case FMODE_READ|FMODE_WRITE:
1003 set_bit(NFS_O_RDWR_STATE, &state->flags);
1004 }
1005 }
1006
1007 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1008 {
1009 write_seqlock(&state->seqlock);
1010 nfs_set_open_stateid_locked(state, stateid, fmode);
1011 write_sequnlock(&state->seqlock);
1012 }
1013
1014 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1015 {
1016 /*
1017 * Protect the call to nfs4_state_set_mode_locked and
1018 * serialise the stateid update
1019 */
1020 write_seqlock(&state->seqlock);
1021 if (deleg_stateid != NULL) {
1022 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1023 set_bit(NFS_DELEGATED_STATE, &state->flags);
1024 }
1025 if (open_stateid != NULL)
1026 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1027 write_sequnlock(&state->seqlock);
1028 spin_lock(&state->owner->so_lock);
1029 update_open_stateflags(state, fmode);
1030 spin_unlock(&state->owner->so_lock);
1031 }
1032
1033 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1034 {
1035 struct nfs_inode *nfsi = NFS_I(state->inode);
1036 struct nfs_delegation *deleg_cur;
1037 int ret = 0;
1038
1039 fmode &= (FMODE_READ|FMODE_WRITE);
1040
1041 rcu_read_lock();
1042 deleg_cur = rcu_dereference(nfsi->delegation);
1043 if (deleg_cur == NULL)
1044 goto no_delegation;
1045
1046 spin_lock(&deleg_cur->lock);
1047 if (nfsi->delegation != deleg_cur ||
1048 (deleg_cur->type & fmode) != fmode)
1049 goto no_delegation_unlock;
1050
1051 if (delegation == NULL)
1052 delegation = &deleg_cur->stateid;
1053 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1054 goto no_delegation_unlock;
1055
1056 nfs_mark_delegation_referenced(deleg_cur);
1057 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1058 ret = 1;
1059 no_delegation_unlock:
1060 spin_unlock(&deleg_cur->lock);
1061 no_delegation:
1062 rcu_read_unlock();
1063
1064 if (!ret && open_stateid != NULL) {
1065 __update_open_stateid(state, open_stateid, NULL, fmode);
1066 ret = 1;
1067 }
1068
1069 return ret;
1070 }
1071
1072
1073 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1074 {
1075 struct nfs_delegation *delegation;
1076
1077 rcu_read_lock();
1078 delegation = rcu_dereference(NFS_I(inode)->delegation);
1079 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1080 rcu_read_unlock();
1081 return;
1082 }
1083 rcu_read_unlock();
1084 nfs4_inode_return_delegation(inode);
1085 }
1086
1087 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1088 {
1089 struct nfs4_state *state = opendata->state;
1090 struct nfs_inode *nfsi = NFS_I(state->inode);
1091 struct nfs_delegation *delegation;
1092 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
1093 fmode_t fmode = opendata->o_arg.fmode;
1094 nfs4_stateid stateid;
1095 int ret = -EAGAIN;
1096
1097 for (;;) {
1098 if (can_open_cached(state, fmode, open_mode)) {
1099 spin_lock(&state->owner->so_lock);
1100 if (can_open_cached(state, fmode, open_mode)) {
1101 update_open_stateflags(state, fmode);
1102 spin_unlock(&state->owner->so_lock);
1103 goto out_return_state;
1104 }
1105 spin_unlock(&state->owner->so_lock);
1106 }
1107 rcu_read_lock();
1108 delegation = rcu_dereference(nfsi->delegation);
1109 if (!can_open_delegated(delegation, fmode)) {
1110 rcu_read_unlock();
1111 break;
1112 }
1113 /* Save the delegation */
1114 nfs4_stateid_copy(&stateid, &delegation->stateid);
1115 rcu_read_unlock();
1116 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1117 if (ret != 0)
1118 goto out;
1119 ret = -EAGAIN;
1120
1121 /* Try to update the stateid using the delegation */
1122 if (update_open_stateid(state, NULL, &stateid, fmode))
1123 goto out_return_state;
1124 }
1125 out:
1126 return ERR_PTR(ret);
1127 out_return_state:
1128 atomic_inc(&state->count);
1129 return state;
1130 }
1131
1132 static void
1133 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1134 {
1135 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1136 struct nfs_delegation *delegation;
1137 int delegation_flags = 0;
1138
1139 rcu_read_lock();
1140 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1141 if (delegation)
1142 delegation_flags = delegation->flags;
1143 rcu_read_unlock();
1144 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1145 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1146 "returning a delegation for "
1147 "OPEN(CLAIM_DELEGATE_CUR)\n",
1148 clp->cl_hostname);
1149 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1150 nfs_inode_set_delegation(state->inode,
1151 data->owner->so_cred,
1152 &data->o_res);
1153 else
1154 nfs_inode_reclaim_delegation(state->inode,
1155 data->owner->so_cred,
1156 &data->o_res);
1157 }
1158
1159 /*
1160 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1161 * and update the nfs4_state.
1162 */
1163 static struct nfs4_state *
1164 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1165 {
1166 struct inode *inode = data->state->inode;
1167 struct nfs4_state *state = data->state;
1168 int ret;
1169
1170 if (!data->rpc_done) {
1171 ret = data->rpc_status;
1172 goto err;
1173 }
1174
1175 ret = -ESTALE;
1176 if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) ||
1177 !(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) ||
1178 !(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE))
1179 goto err;
1180
1181 ret = -ENOMEM;
1182 state = nfs4_get_open_state(inode, data->owner);
1183 if (state == NULL)
1184 goto err;
1185
1186 ret = nfs_refresh_inode(inode, &data->f_attr);
1187 if (ret)
1188 goto err;
1189
1190 if (data->o_res.delegation_type != 0)
1191 nfs4_opendata_check_deleg(data, state);
1192 update_open_stateid(state, &data->o_res.stateid, NULL,
1193 data->o_arg.fmode);
1194
1195 return state;
1196 err:
1197 return ERR_PTR(ret);
1198
1199 }
1200
1201 static struct nfs4_state *
1202 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1203 {
1204 struct inode *inode;
1205 struct nfs4_state *state = NULL;
1206 int ret;
1207
1208 if (!data->rpc_done) {
1209 state = nfs4_try_open_cached(data);
1210 goto out;
1211 }
1212
1213 ret = -EAGAIN;
1214 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1215 goto err;
1216 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1217 ret = PTR_ERR(inode);
1218 if (IS_ERR(inode))
1219 goto err;
1220 ret = -ENOMEM;
1221 state = nfs4_get_open_state(inode, data->owner);
1222 if (state == NULL)
1223 goto err_put_inode;
1224 if (data->o_res.delegation_type != 0)
1225 nfs4_opendata_check_deleg(data, state);
1226 update_open_stateid(state, &data->o_res.stateid, NULL,
1227 data->o_arg.fmode);
1228 iput(inode);
1229 out:
1230 return state;
1231 err_put_inode:
1232 iput(inode);
1233 err:
1234 return ERR_PTR(ret);
1235 }
1236
1237 static struct nfs4_state *
1238 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1239 {
1240 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1241 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1242 return _nfs4_opendata_to_nfs4_state(data);
1243 }
1244
1245 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1246 {
1247 struct nfs_inode *nfsi = NFS_I(state->inode);
1248 struct nfs_open_context *ctx;
1249
1250 spin_lock(&state->inode->i_lock);
1251 list_for_each_entry(ctx, &nfsi->open_files, list) {
1252 if (ctx->state != state)
1253 continue;
1254 get_nfs_open_context(ctx);
1255 spin_unlock(&state->inode->i_lock);
1256 return ctx;
1257 }
1258 spin_unlock(&state->inode->i_lock);
1259 return ERR_PTR(-ENOENT);
1260 }
1261
1262 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1263 {
1264 struct nfs4_opendata *opendata;
1265
1266 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
1267 if (opendata == NULL)
1268 return ERR_PTR(-ENOMEM);
1269 opendata->state = state;
1270 atomic_inc(&state->count);
1271 return opendata;
1272 }
1273
1274 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1275 {
1276 struct nfs4_state *newstate;
1277 int ret;
1278
1279 opendata->o_arg.open_flags = 0;
1280 opendata->o_arg.fmode = fmode;
1281 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1282 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1283 nfs4_init_opendata_res(opendata);
1284 ret = _nfs4_recover_proc_open(opendata);
1285 if (ret != 0)
1286 return ret;
1287 newstate = nfs4_opendata_to_nfs4_state(opendata);
1288 if (IS_ERR(newstate))
1289 return PTR_ERR(newstate);
1290 nfs4_close_state(newstate, fmode);
1291 *res = newstate;
1292 return 0;
1293 }
1294
1295 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1296 {
1297 struct nfs4_state *newstate;
1298 int ret;
1299
1300 /* memory barrier prior to reading state->n_* */
1301 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1302 smp_rmb();
1303 if (state->n_rdwr != 0) {
1304 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1305 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1306 if (ret != 0)
1307 return ret;
1308 if (newstate != state)
1309 return -ESTALE;
1310 }
1311 if (state->n_wronly != 0) {
1312 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1313 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1314 if (ret != 0)
1315 return ret;
1316 if (newstate != state)
1317 return -ESTALE;
1318 }
1319 if (state->n_rdonly != 0) {
1320 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1321 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1322 if (ret != 0)
1323 return ret;
1324 if (newstate != state)
1325 return -ESTALE;
1326 }
1327 /*
1328 * We may have performed cached opens for all three recoveries.
1329 * Check if we need to update the current stateid.
1330 */
1331 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1332 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1333 write_seqlock(&state->seqlock);
1334 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1335 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1336 write_sequnlock(&state->seqlock);
1337 }
1338 return 0;
1339 }
1340
1341 /*
1342 * OPEN_RECLAIM:
1343 * reclaim state on the server after a reboot.
1344 */
1345 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1346 {
1347 struct nfs_delegation *delegation;
1348 struct nfs4_opendata *opendata;
1349 fmode_t delegation_type = 0;
1350 int status;
1351
1352 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1353 if (IS_ERR(opendata))
1354 return PTR_ERR(opendata);
1355 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1356 opendata->o_arg.fh = NFS_FH(state->inode);
1357 rcu_read_lock();
1358 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1359 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1360 delegation_type = delegation->type;
1361 rcu_read_unlock();
1362 opendata->o_arg.u.delegation_type = delegation_type;
1363 status = nfs4_open_recover(opendata, state);
1364 nfs4_opendata_put(opendata);
1365 return status;
1366 }
1367
1368 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1369 {
1370 struct nfs_server *server = NFS_SERVER(state->inode);
1371 struct nfs4_exception exception = { };
1372 int err;
1373 do {
1374 err = _nfs4_do_open_reclaim(ctx, state);
1375 if (err != -NFS4ERR_DELAY)
1376 break;
1377 nfs4_handle_exception(server, err, &exception);
1378 } while (exception.retry);
1379 return err;
1380 }
1381
1382 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1383 {
1384 struct nfs_open_context *ctx;
1385 int ret;
1386
1387 ctx = nfs4_state_find_open_context(state);
1388 if (IS_ERR(ctx))
1389 return PTR_ERR(ctx);
1390 ret = nfs4_do_open_reclaim(ctx, state);
1391 put_nfs_open_context(ctx);
1392 return ret;
1393 }
1394
1395 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1396 {
1397 struct nfs4_opendata *opendata;
1398 int ret;
1399
1400 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1401 if (IS_ERR(opendata))
1402 return PTR_ERR(opendata);
1403 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1404 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1405 ret = nfs4_open_recover(opendata, state);
1406 nfs4_opendata_put(opendata);
1407 return ret;
1408 }
1409
1410 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1411 {
1412 struct nfs4_exception exception = { };
1413 struct nfs_server *server = NFS_SERVER(state->inode);
1414 int err;
1415 do {
1416 err = _nfs4_open_delegation_recall(ctx, state, stateid);
1417 switch (err) {
1418 case 0:
1419 case -ENOENT:
1420 case -ESTALE:
1421 goto out;
1422 case -NFS4ERR_BADSESSION:
1423 case -NFS4ERR_BADSLOT:
1424 case -NFS4ERR_BAD_HIGH_SLOT:
1425 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1426 case -NFS4ERR_DEADSESSION:
1427 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1428 goto out;
1429 case -NFS4ERR_STALE_CLIENTID:
1430 case -NFS4ERR_STALE_STATEID:
1431 case -NFS4ERR_EXPIRED:
1432 /* Don't recall a delegation if it was lost */
1433 nfs4_schedule_lease_recovery(server->nfs_client);
1434 goto out;
1435 case -ERESTARTSYS:
1436 /*
1437 * The show must go on: exit, but mark the
1438 * stateid as needing recovery.
1439 */
1440 case -NFS4ERR_DELEG_REVOKED:
1441 case -NFS4ERR_ADMIN_REVOKED:
1442 case -NFS4ERR_BAD_STATEID:
1443 nfs_inode_find_state_and_recover(state->inode,
1444 stateid);
1445 nfs4_schedule_stateid_recovery(server, state);
1446 case -EKEYEXPIRED:
1447 /*
1448 * User RPCSEC_GSS context has expired.
1449 * We cannot recover this stateid now, so
1450 * skip it and allow recovery thread to
1451 * proceed.
1452 */
1453 case -ENOMEM:
1454 err = 0;
1455 goto out;
1456 }
1457 err = nfs4_handle_exception(server, err, &exception);
1458 } while (exception.retry);
1459 out:
1460 return err;
1461 }
1462
1463 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1464 {
1465 struct nfs4_opendata *data = calldata;
1466
1467 data->rpc_status = task->tk_status;
1468 if (data->rpc_status == 0) {
1469 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1470 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1471 renew_lease(data->o_res.server, data->timestamp);
1472 data->rpc_done = 1;
1473 }
1474 }
1475
1476 static void nfs4_open_confirm_release(void *calldata)
1477 {
1478 struct nfs4_opendata *data = calldata;
1479 struct nfs4_state *state = NULL;
1480
1481 /* If this request hasn't been cancelled, do nothing */
1482 if (data->cancelled == 0)
1483 goto out_free;
1484 /* In case of error, no cleanup! */
1485 if (!data->rpc_done)
1486 goto out_free;
1487 state = nfs4_opendata_to_nfs4_state(data);
1488 if (!IS_ERR(state))
1489 nfs4_close_state(state, data->o_arg.fmode);
1490 out_free:
1491 nfs4_opendata_put(data);
1492 }
1493
1494 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1495 .rpc_call_done = nfs4_open_confirm_done,
1496 .rpc_release = nfs4_open_confirm_release,
1497 };
1498
1499 /*
1500 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1501 */
1502 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1503 {
1504 struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1505 struct rpc_task *task;
1506 struct rpc_message msg = {
1507 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1508 .rpc_argp = &data->c_arg,
1509 .rpc_resp = &data->c_res,
1510 .rpc_cred = data->owner->so_cred,
1511 };
1512 struct rpc_task_setup task_setup_data = {
1513 .rpc_client = server->client,
1514 .rpc_message = &msg,
1515 .callback_ops = &nfs4_open_confirm_ops,
1516 .callback_data = data,
1517 .workqueue = nfsiod_workqueue,
1518 .flags = RPC_TASK_ASYNC,
1519 };
1520 int status;
1521
1522 kref_get(&data->kref);
1523 data->rpc_done = 0;
1524 data->rpc_status = 0;
1525 data->timestamp = jiffies;
1526 task = rpc_run_task(&task_setup_data);
1527 if (IS_ERR(task))
1528 return PTR_ERR(task);
1529 status = nfs4_wait_for_completion_rpc_task(task);
1530 if (status != 0) {
1531 data->cancelled = 1;
1532 smp_wmb();
1533 } else
1534 status = data->rpc_status;
1535 rpc_put_task(task);
1536 return status;
1537 }
1538
1539 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1540 {
1541 struct nfs4_opendata *data = calldata;
1542 struct nfs4_state_owner *sp = data->owner;
1543
1544 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1545 return;
1546 /*
1547 * Check if we still need to send an OPEN call, or if we can use
1548 * a delegation instead.
1549 */
1550 if (data->state != NULL) {
1551 struct nfs_delegation *delegation;
1552
1553 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1554 goto out_no_action;
1555 rcu_read_lock();
1556 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1557 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1558 can_open_delegated(delegation, data->o_arg.fmode))
1559 goto unlock_no_action;
1560 rcu_read_unlock();
1561 }
1562 /* Update client id. */
1563 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1564 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1565 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1566 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1567 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1568 }
1569 data->timestamp = jiffies;
1570 if (nfs4_setup_sequence(data->o_arg.server,
1571 &data->o_arg.seq_args,
1572 &data->o_res.seq_res,
1573 task) != 0)
1574 nfs_release_seqid(data->o_arg.seqid);
1575 else
1576 rpc_call_start(task);
1577 return;
1578 unlock_no_action:
1579 rcu_read_unlock();
1580 out_no_action:
1581 task->tk_action = NULL;
1582
1583 }
1584
1585 static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
1586 {
1587 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
1588 nfs4_open_prepare(task, calldata);
1589 }
1590
1591 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1592 {
1593 struct nfs4_opendata *data = calldata;
1594
1595 data->rpc_status = task->tk_status;
1596
1597 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1598 return;
1599
1600 if (task->tk_status == 0) {
1601 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
1602 switch (data->o_res.f_attr->mode & S_IFMT) {
1603 case S_IFREG:
1604 break;
1605 case S_IFLNK:
1606 data->rpc_status = -ELOOP;
1607 break;
1608 case S_IFDIR:
1609 data->rpc_status = -EISDIR;
1610 break;
1611 default:
1612 data->rpc_status = -ENOTDIR;
1613 }
1614 }
1615 renew_lease(data->o_res.server, data->timestamp);
1616 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1617 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1618 }
1619 data->rpc_done = 1;
1620 }
1621
1622 static void nfs4_open_release(void *calldata)
1623 {
1624 struct nfs4_opendata *data = calldata;
1625 struct nfs4_state *state = NULL;
1626
1627 /* If this request hasn't been cancelled, do nothing */
1628 if (data->cancelled == 0)
1629 goto out_free;
1630 /* In case of error, no cleanup! */
1631 if (data->rpc_status != 0 || !data->rpc_done)
1632 goto out_free;
1633 /* In case we need an open_confirm, no cleanup! */
1634 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1635 goto out_free;
1636 state = nfs4_opendata_to_nfs4_state(data);
1637 if (!IS_ERR(state))
1638 nfs4_close_state(state, data->o_arg.fmode);
1639 out_free:
1640 nfs4_opendata_put(data);
1641 }
1642
1643 static const struct rpc_call_ops nfs4_open_ops = {
1644 .rpc_call_prepare = nfs4_open_prepare,
1645 .rpc_call_done = nfs4_open_done,
1646 .rpc_release = nfs4_open_release,
1647 };
1648
1649 static const struct rpc_call_ops nfs4_recover_open_ops = {
1650 .rpc_call_prepare = nfs4_recover_open_prepare,
1651 .rpc_call_done = nfs4_open_done,
1652 .rpc_release = nfs4_open_release,
1653 };
1654
1655 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1656 {
1657 struct inode *dir = data->dir->d_inode;
1658 struct nfs_server *server = NFS_SERVER(dir);
1659 struct nfs_openargs *o_arg = &data->o_arg;
1660 struct nfs_openres *o_res = &data->o_res;
1661 struct rpc_task *task;
1662 struct rpc_message msg = {
1663 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1664 .rpc_argp = o_arg,
1665 .rpc_resp = o_res,
1666 .rpc_cred = data->owner->so_cred,
1667 };
1668 struct rpc_task_setup task_setup_data = {
1669 .rpc_client = server->client,
1670 .rpc_message = &msg,
1671 .callback_ops = &nfs4_open_ops,
1672 .callback_data = data,
1673 .workqueue = nfsiod_workqueue,
1674 .flags = RPC_TASK_ASYNC,
1675 };
1676 int status;
1677
1678 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1679 kref_get(&data->kref);
1680 data->rpc_done = 0;
1681 data->rpc_status = 0;
1682 data->cancelled = 0;
1683 if (isrecover)
1684 task_setup_data.callback_ops = &nfs4_recover_open_ops;
1685 task = rpc_run_task(&task_setup_data);
1686 if (IS_ERR(task))
1687 return PTR_ERR(task);
1688 status = nfs4_wait_for_completion_rpc_task(task);
1689 if (status != 0) {
1690 data->cancelled = 1;
1691 smp_wmb();
1692 } else
1693 status = data->rpc_status;
1694 rpc_put_task(task);
1695
1696 return status;
1697 }
1698
1699 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1700 {
1701 struct inode *dir = data->dir->d_inode;
1702 struct nfs_openres *o_res = &data->o_res;
1703 int status;
1704
1705 status = nfs4_run_open_task(data, 1);
1706 if (status != 0 || !data->rpc_done)
1707 return status;
1708
1709 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1710
1711 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1712 status = _nfs4_proc_open_confirm(data);
1713 if (status != 0)
1714 return status;
1715 }
1716
1717 return status;
1718 }
1719
1720 static int nfs4_opendata_access(struct rpc_cred *cred,
1721 struct nfs4_opendata *opendata,
1722 struct nfs4_state *state, fmode_t fmode)
1723 {
1724 struct nfs_access_entry cache;
1725 u32 mask;
1726
1727 /* access call failed or for some reason the server doesn't
1728 * support any access modes -- defer access call until later */
1729 if (opendata->o_res.access_supported == 0)
1730 return 0;
1731
1732 mask = 0;
1733 /* don't check MAY_WRITE - a newly created file may not have
1734 * write mode bits, but POSIX allows the creating process to write */
1735 if (fmode & FMODE_READ)
1736 mask |= MAY_READ;
1737 if (fmode & FMODE_EXEC)
1738 mask |= MAY_EXEC;
1739
1740 cache.cred = cred;
1741 cache.jiffies = jiffies;
1742 nfs_access_set_mask(&cache, opendata->o_res.access_result);
1743 nfs_access_add_cache(state->inode, &cache);
1744
1745 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
1746 return 0;
1747
1748 /* even though OPEN succeeded, access is denied. Close the file */
1749 nfs4_close_state(state, fmode);
1750 return -EACCES;
1751 }
1752
1753 /*
1754 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1755 */
1756 static int _nfs4_proc_open(struct nfs4_opendata *data)
1757 {
1758 struct inode *dir = data->dir->d_inode;
1759 struct nfs_server *server = NFS_SERVER(dir);
1760 struct nfs_openargs *o_arg = &data->o_arg;
1761 struct nfs_openres *o_res = &data->o_res;
1762 int status;
1763
1764 status = nfs4_run_open_task(data, 0);
1765 if (!data->rpc_done)
1766 return status;
1767 if (status != 0) {
1768 if (status == -NFS4ERR_BADNAME &&
1769 !(o_arg->open_flags & O_CREAT))
1770 return -ENOENT;
1771 return status;
1772 }
1773
1774 nfs_fattr_map_and_free_names(server, &data->f_attr);
1775
1776 if (o_arg->open_flags & O_CREAT)
1777 update_changeattr(dir, &o_res->cinfo);
1778 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1779 server->caps &= ~NFS_CAP_POSIX_LOCK;
1780 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1781 status = _nfs4_proc_open_confirm(data);
1782 if (status != 0)
1783 return status;
1784 }
1785 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1786 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1787 return 0;
1788 }
1789
1790 static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1791 {
1792 unsigned int loop;
1793 int ret;
1794
1795 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1796 ret = nfs4_wait_clnt_recover(clp);
1797 if (ret != 0)
1798 break;
1799 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1800 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1801 break;
1802 nfs4_schedule_state_manager(clp);
1803 ret = -EIO;
1804 }
1805 return ret;
1806 }
1807
1808 static int nfs4_recover_expired_lease(struct nfs_server *server)
1809 {
1810 return nfs4_client_recover_expired_lease(server->nfs_client);
1811 }
1812
1813 /*
1814 * OPEN_EXPIRED:
1815 * reclaim state on the server after a network partition.
1816 * Assumes caller holds the appropriate lock
1817 */
1818 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1819 {
1820 struct nfs4_opendata *opendata;
1821 int ret;
1822
1823 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1824 if (IS_ERR(opendata))
1825 return PTR_ERR(opendata);
1826 ret = nfs4_open_recover(opendata, state);
1827 if (ret == -ESTALE)
1828 d_drop(ctx->dentry);
1829 nfs4_opendata_put(opendata);
1830 return ret;
1831 }
1832
1833 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1834 {
1835 struct nfs_server *server = NFS_SERVER(state->inode);
1836 struct nfs4_exception exception = { };
1837 int err;
1838
1839 do {
1840 err = _nfs4_open_expired(ctx, state);
1841 switch (err) {
1842 default:
1843 goto out;
1844 case -NFS4ERR_GRACE:
1845 case -NFS4ERR_DELAY:
1846 nfs4_handle_exception(server, err, &exception);
1847 err = 0;
1848 }
1849 } while (exception.retry);
1850 out:
1851 return err;
1852 }
1853
1854 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1855 {
1856 struct nfs_open_context *ctx;
1857 int ret;
1858
1859 ctx = nfs4_state_find_open_context(state);
1860 if (IS_ERR(ctx))
1861 return PTR_ERR(ctx);
1862 ret = nfs4_do_open_expired(ctx, state);
1863 put_nfs_open_context(ctx);
1864 return ret;
1865 }
1866
1867 #if defined(CONFIG_NFS_V4_1)
1868 static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
1869 {
1870 struct nfs_server *server = NFS_SERVER(state->inode);
1871 nfs4_stateid *stateid = &state->stateid;
1872 int status;
1873
1874 /* If a state reset has been done, test_stateid is unneeded */
1875 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1876 return;
1877
1878 status = nfs41_test_stateid(server, stateid);
1879 if (status != NFS_OK) {
1880 /* Free the stateid unless the server explicitly
1881 * informs us the stateid is unrecognized. */
1882 if (status != -NFS4ERR_BAD_STATEID)
1883 nfs41_free_stateid(server, stateid);
1884 nfs_remove_bad_delegation(state->inode);
1885
1886 write_seqlock(&state->seqlock);
1887 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1888 write_sequnlock(&state->seqlock);
1889 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1890 }
1891 }
1892
1893 /**
1894 * nfs41_check_open_stateid - possibly free an open stateid
1895 *
1896 * @state: NFSv4 state for an inode
1897 *
1898 * Returns NFS_OK if recovery for this stateid is now finished.
1899 * Otherwise a negative NFS4ERR value is returned.
1900 */
1901 static int nfs41_check_open_stateid(struct nfs4_state *state)
1902 {
1903 struct nfs_server *server = NFS_SERVER(state->inode);
1904 nfs4_stateid *stateid = &state->open_stateid;
1905 int status;
1906
1907 /* If a state reset has been done, test_stateid is unneeded */
1908 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
1909 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
1910 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
1911 return -NFS4ERR_BAD_STATEID;
1912
1913 status = nfs41_test_stateid(server, stateid);
1914 if (status != NFS_OK) {
1915 /* Free the stateid unless the server explicitly
1916 * informs us the stateid is unrecognized. */
1917 if (status != -NFS4ERR_BAD_STATEID)
1918 nfs41_free_stateid(server, stateid);
1919
1920 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1921 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1922 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1923 }
1924 return status;
1925 }
1926
1927 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1928 {
1929 int status;
1930
1931 nfs41_clear_delegation_stateid(state);
1932 status = nfs41_check_open_stateid(state);
1933 if (status != NFS_OK)
1934 status = nfs4_open_expired(sp, state);
1935 return status;
1936 }
1937 #endif
1938
1939 /*
1940 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1941 * fields corresponding to attributes that were used to store the verifier.
1942 * Make sure we clobber those fields in the later setattr call
1943 */
1944 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1945 {
1946 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1947 !(sattr->ia_valid & ATTR_ATIME_SET))
1948 sattr->ia_valid |= ATTR_ATIME;
1949
1950 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1951 !(sattr->ia_valid & ATTR_MTIME_SET))
1952 sattr->ia_valid |= ATTR_MTIME;
1953 }
1954
1955 /*
1956 * Returns a referenced nfs4_state
1957 */
1958 static int _nfs4_do_open(struct inode *dir,
1959 struct dentry *dentry,
1960 fmode_t fmode,
1961 int flags,
1962 struct iattr *sattr,
1963 struct rpc_cred *cred,
1964 struct nfs4_state **res,
1965 struct nfs4_threshold **ctx_th)
1966 {
1967 struct nfs4_state_owner *sp;
1968 struct nfs4_state *state = NULL;
1969 struct nfs_server *server = NFS_SERVER(dir);
1970 struct nfs4_opendata *opendata;
1971 int status;
1972
1973 /* Protect against reboot recovery conflicts */
1974 status = -ENOMEM;
1975 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
1976 if (sp == NULL) {
1977 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1978 goto out_err;
1979 }
1980 status = nfs4_recover_expired_lease(server);
1981 if (status != 0)
1982 goto err_put_state_owner;
1983 if (dentry->d_inode != NULL)
1984 nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
1985 status = -ENOMEM;
1986 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
1987 if (opendata == NULL)
1988 goto err_put_state_owner;
1989
1990 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
1991 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
1992 if (!opendata->f_attr.mdsthreshold)
1993 goto err_opendata_put;
1994 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
1995 }
1996 if (dentry->d_inode != NULL)
1997 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
1998
1999 status = _nfs4_proc_open(opendata);
2000 if (status != 0)
2001 goto err_opendata_put;
2002
2003 state = nfs4_opendata_to_nfs4_state(opendata);
2004 status = PTR_ERR(state);
2005 if (IS_ERR(state))
2006 goto err_opendata_put;
2007 if (server->caps & NFS_CAP_POSIX_LOCK)
2008 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2009
2010 status = nfs4_opendata_access(cred, opendata, state, fmode);
2011 if (status != 0)
2012 goto err_opendata_put;
2013
2014 if (opendata->o_arg.open_flags & O_EXCL) {
2015 nfs4_exclusive_attrset(opendata, sattr);
2016
2017 nfs_fattr_init(opendata->o_res.f_attr);
2018 status = nfs4_do_setattr(state->inode, cred,
2019 opendata->o_res.f_attr, sattr,
2020 state);
2021 if (status == 0)
2022 nfs_setattr_update_inode(state->inode, sattr);
2023 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
2024 }
2025
2026 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
2027 *ctx_th = opendata->f_attr.mdsthreshold;
2028 else
2029 kfree(opendata->f_attr.mdsthreshold);
2030 opendata->f_attr.mdsthreshold = NULL;
2031
2032 nfs4_opendata_put(opendata);
2033 nfs4_put_state_owner(sp);
2034 *res = state;
2035 return 0;
2036 err_opendata_put:
2037 kfree(opendata->f_attr.mdsthreshold);
2038 nfs4_opendata_put(opendata);
2039 err_put_state_owner:
2040 nfs4_put_state_owner(sp);
2041 out_err:
2042 *res = NULL;
2043 return status;
2044 }
2045
2046
2047 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2048 struct dentry *dentry,
2049 fmode_t fmode,
2050 int flags,
2051 struct iattr *sattr,
2052 struct rpc_cred *cred,
2053 struct nfs4_threshold **ctx_th)
2054 {
2055 struct nfs4_exception exception = { };
2056 struct nfs4_state *res;
2057 int status;
2058
2059 fmode &= FMODE_READ|FMODE_WRITE|FMODE_EXEC;
2060 do {
2061 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
2062 &res, ctx_th);
2063 if (status == 0)
2064 break;
2065 /* NOTE: BAD_SEQID means the server and client disagree about the
2066 * book-keeping w.r.t. state-changing operations
2067 * (OPEN/CLOSE/LOCK/LOCKU...)
2068 * It is actually a sign of a bug on the client or on the server.
2069 *
2070 * If we receive a BAD_SEQID error in the particular case of
2071 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2072 * have unhashed the old state_owner for us, and that we can
2073 * therefore safely retry using a new one. We should still warn
2074 * the user though...
2075 */
2076 if (status == -NFS4ERR_BAD_SEQID) {
2077 pr_warn_ratelimited("NFS: v4 server %s "
2078 " returned a bad sequence-id error!\n",
2079 NFS_SERVER(dir)->nfs_client->cl_hostname);
2080 exception.retry = 1;
2081 continue;
2082 }
2083 /*
2084 * BAD_STATEID on OPEN means that the server cancelled our
2085 * state before it received the OPEN_CONFIRM.
2086 * Recover by retrying the request as per the discussion
2087 * on Page 181 of RFC3530.
2088 */
2089 if (status == -NFS4ERR_BAD_STATEID) {
2090 exception.retry = 1;
2091 continue;
2092 }
2093 if (status == -EAGAIN) {
2094 /* We must have found a delegation */
2095 exception.retry = 1;
2096 continue;
2097 }
2098 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
2099 status, &exception));
2100 } while (exception.retry);
2101 return res;
2102 }
2103
2104 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2105 struct nfs_fattr *fattr, struct iattr *sattr,
2106 struct nfs4_state *state)
2107 {
2108 struct nfs_server *server = NFS_SERVER(inode);
2109 struct nfs_setattrargs arg = {
2110 .fh = NFS_FH(inode),
2111 .iap = sattr,
2112 .server = server,
2113 .bitmask = server->attr_bitmask,
2114 };
2115 struct nfs_setattrres res = {
2116 .fattr = fattr,
2117 .server = server,
2118 };
2119 struct rpc_message msg = {
2120 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2121 .rpc_argp = &arg,
2122 .rpc_resp = &res,
2123 .rpc_cred = cred,
2124 };
2125 unsigned long timestamp = jiffies;
2126 int status;
2127
2128 nfs_fattr_init(fattr);
2129
2130 if (state != NULL) {
2131 struct nfs_lockowner lockowner = {
2132 .l_owner = current->files,
2133 .l_pid = current->tgid,
2134 };
2135 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2136 &lockowner);
2137 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
2138 FMODE_WRITE)) {
2139 /* Use that stateid */
2140 } else
2141 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2142
2143 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2144 if (status == 0 && state != NULL)
2145 renew_lease(server, timestamp);
2146 return status;
2147 }
2148
2149 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2150 struct nfs_fattr *fattr, struct iattr *sattr,
2151 struct nfs4_state *state)
2152 {
2153 struct nfs_server *server = NFS_SERVER(inode);
2154 struct nfs4_exception exception = {
2155 .state = state,
2156 .inode = inode,
2157 };
2158 int err;
2159 do {
2160 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
2161 switch (err) {
2162 case -NFS4ERR_OPENMODE:
2163 if (state && !(state->state & FMODE_WRITE)) {
2164 err = -EBADF;
2165 if (sattr->ia_valid & ATTR_OPEN)
2166 err = -EACCES;
2167 goto out;
2168 }
2169 }
2170 err = nfs4_handle_exception(server, err, &exception);
2171 } while (exception.retry);
2172 out:
2173 return err;
2174 }
2175
2176 struct nfs4_closedata {
2177 struct inode *inode;
2178 struct nfs4_state *state;
2179 struct nfs_closeargs arg;
2180 struct nfs_closeres res;
2181 struct nfs_fattr fattr;
2182 unsigned long timestamp;
2183 bool roc;
2184 u32 roc_barrier;
2185 };
2186
2187 static void nfs4_free_closedata(void *data)
2188 {
2189 struct nfs4_closedata *calldata = data;
2190 struct nfs4_state_owner *sp = calldata->state->owner;
2191 struct super_block *sb = calldata->state->inode->i_sb;
2192
2193 if (calldata->roc)
2194 pnfs_roc_release(calldata->state->inode);
2195 nfs4_put_open_state(calldata->state);
2196 nfs_free_seqid(calldata->arg.seqid);
2197 nfs4_put_state_owner(sp);
2198 nfs_sb_deactive_async(sb);
2199 kfree(calldata);
2200 }
2201
2202 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
2203 fmode_t fmode)
2204 {
2205 spin_lock(&state->owner->so_lock);
2206 if (!(fmode & FMODE_READ))
2207 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2208 if (!(fmode & FMODE_WRITE))
2209 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2210 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2211 spin_unlock(&state->owner->so_lock);
2212 }
2213
2214 static void nfs4_close_done(struct rpc_task *task, void *data)
2215 {
2216 struct nfs4_closedata *calldata = data;
2217 struct nfs4_state *state = calldata->state;
2218 struct nfs_server *server = NFS_SERVER(calldata->inode);
2219
2220 dprintk("%s: begin!\n", __func__);
2221 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2222 return;
2223 /* hmm. we are done with the inode, and in the process of freeing
2224 * the state_owner. we keep this around to process errors
2225 */
2226 switch (task->tk_status) {
2227 case 0:
2228 if (calldata->roc)
2229 pnfs_roc_set_barrier(state->inode,
2230 calldata->roc_barrier);
2231 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2232 renew_lease(server, calldata->timestamp);
2233 nfs4_close_clear_stateid_flags(state,
2234 calldata->arg.fmode);
2235 break;
2236 case -NFS4ERR_STALE_STATEID:
2237 case -NFS4ERR_OLD_STATEID:
2238 case -NFS4ERR_BAD_STATEID:
2239 case -NFS4ERR_EXPIRED:
2240 if (calldata->arg.fmode == 0)
2241 break;
2242 default:
2243 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
2244 rpc_restart_call_prepare(task);
2245 }
2246 nfs_release_seqid(calldata->arg.seqid);
2247 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2248 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2249 }
2250
2251 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2252 {
2253 struct nfs4_closedata *calldata = data;
2254 struct nfs4_state *state = calldata->state;
2255 struct inode *inode = calldata->inode;
2256 int call_close = 0;
2257
2258 dprintk("%s: begin!\n", __func__);
2259 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2260 return;
2261
2262 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2263 calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
2264 spin_lock(&state->owner->so_lock);
2265 /* Calculate the change in open mode */
2266 if (state->n_rdwr == 0) {
2267 if (state->n_rdonly == 0) {
2268 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
2269 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2270 calldata->arg.fmode &= ~FMODE_READ;
2271 }
2272 if (state->n_wronly == 0) {
2273 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
2274 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2275 calldata->arg.fmode &= ~FMODE_WRITE;
2276 }
2277 }
2278 spin_unlock(&state->owner->so_lock);
2279
2280 if (!call_close) {
2281 /* Note: exit _without_ calling nfs4_close_done */
2282 task->tk_action = NULL;
2283 goto out;
2284 }
2285
2286 if (calldata->arg.fmode == 0) {
2287 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2288 if (calldata->roc &&
2289 pnfs_roc_drain(inode, &calldata->roc_barrier, task))
2290 goto out;
2291 }
2292
2293 nfs_fattr_init(calldata->res.fattr);
2294 calldata->timestamp = jiffies;
2295 if (nfs4_setup_sequence(NFS_SERVER(inode),
2296 &calldata->arg.seq_args,
2297 &calldata->res.seq_res,
2298 task) != 0)
2299 nfs_release_seqid(calldata->arg.seqid);
2300 else
2301 rpc_call_start(task);
2302 out:
2303 dprintk("%s: done!\n", __func__);
2304 }
2305
2306 static const struct rpc_call_ops nfs4_close_ops = {
2307 .rpc_call_prepare = nfs4_close_prepare,
2308 .rpc_call_done = nfs4_close_done,
2309 .rpc_release = nfs4_free_closedata,
2310 };
2311
2312 /*
2313 * It is possible for data to be read/written from a mem-mapped file
2314 * after the sys_close call (which hits the vfs layer as a flush).
2315 * This means that we can't safely call nfsv4 close on a file until
2316 * the inode is cleared. This in turn means that we are not good
2317 * NFSv4 citizens - we do not indicate to the server to update the file's
2318 * share state even when we are done with one of the three share
2319 * stateid's in the inode.
2320 *
2321 * NOTE: Caller must be holding the sp->so_owner semaphore!
2322 */
2323 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2324 {
2325 struct nfs_server *server = NFS_SERVER(state->inode);
2326 struct nfs4_closedata *calldata;
2327 struct nfs4_state_owner *sp = state->owner;
2328 struct rpc_task *task;
2329 struct rpc_message msg = {
2330 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2331 .rpc_cred = state->owner->so_cred,
2332 };
2333 struct rpc_task_setup task_setup_data = {
2334 .rpc_client = server->client,
2335 .rpc_message = &msg,
2336 .callback_ops = &nfs4_close_ops,
2337 .workqueue = nfsiod_workqueue,
2338 .flags = RPC_TASK_ASYNC,
2339 };
2340 int status = -ENOMEM;
2341
2342 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2343 if (calldata == NULL)
2344 goto out;
2345 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2346 calldata->inode = state->inode;
2347 calldata->state = state;
2348 calldata->arg.fh = NFS_FH(state->inode);
2349 calldata->arg.stateid = &state->open_stateid;
2350 /* Serialization for the sequence id */
2351 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2352 if (calldata->arg.seqid == NULL)
2353 goto out_free_calldata;
2354 calldata->arg.fmode = 0;
2355 calldata->arg.bitmask = server->cache_consistency_bitmask;
2356 calldata->res.fattr = &calldata->fattr;
2357 calldata->res.seqid = calldata->arg.seqid;
2358 calldata->res.server = server;
2359 calldata->roc = pnfs_roc(state->inode);
2360 nfs_sb_active(calldata->inode->i_sb);
2361
2362 msg.rpc_argp = &calldata->arg;
2363 msg.rpc_resp = &calldata->res;
2364 task_setup_data.callback_data = calldata;
2365 task = rpc_run_task(&task_setup_data);
2366 if (IS_ERR(task))
2367 return PTR_ERR(task);
2368 status = 0;
2369 if (wait)
2370 status = rpc_wait_for_completion_task(task);
2371 rpc_put_task(task);
2372 return status;
2373 out_free_calldata:
2374 kfree(calldata);
2375 out:
2376 nfs4_put_open_state(state);
2377 nfs4_put_state_owner(sp);
2378 return status;
2379 }
2380
2381 static struct inode *
2382 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2383 {
2384 struct nfs4_state *state;
2385
2386 /* Protect against concurrent sillydeletes */
2387 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
2388 ctx->cred, &ctx->mdsthreshold);
2389 if (IS_ERR(state))
2390 return ERR_CAST(state);
2391 ctx->state = state;
2392 return igrab(state->inode);
2393 }
2394
2395 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2396 {
2397 if (ctx->state == NULL)
2398 return;
2399 if (is_sync)
2400 nfs4_close_sync(ctx->state, ctx->mode);
2401 else
2402 nfs4_close_state(ctx->state, ctx->mode);
2403 }
2404
2405 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2406 {
2407 struct nfs4_server_caps_arg args = {
2408 .fhandle = fhandle,
2409 };
2410 struct nfs4_server_caps_res res = {};
2411 struct rpc_message msg = {
2412 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2413 .rpc_argp = &args,
2414 .rpc_resp = &res,
2415 };
2416 int status;
2417
2418 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2419 if (status == 0) {
2420 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2421 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2422 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2423 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2424 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2425 NFS_CAP_CTIME|NFS_CAP_MTIME);
2426 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2427 server->caps |= NFS_CAP_ACLS;
2428 if (res.has_links != 0)
2429 server->caps |= NFS_CAP_HARDLINKS;
2430 if (res.has_symlinks != 0)
2431 server->caps |= NFS_CAP_SYMLINKS;
2432 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2433 server->caps |= NFS_CAP_FILEID;
2434 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2435 server->caps |= NFS_CAP_MODE;
2436 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2437 server->caps |= NFS_CAP_NLINK;
2438 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2439 server->caps |= NFS_CAP_OWNER;
2440 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2441 server->caps |= NFS_CAP_OWNER_GROUP;
2442 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2443 server->caps |= NFS_CAP_ATIME;
2444 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2445 server->caps |= NFS_CAP_CTIME;
2446 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2447 server->caps |= NFS_CAP_MTIME;
2448
2449 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2450 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2451 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2452 server->acl_bitmask = res.acl_bitmask;
2453 server->fh_expire_type = res.fh_expire_type;
2454 }
2455
2456 return status;
2457 }
2458
2459 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2460 {
2461 struct nfs4_exception exception = { };
2462 int err;
2463 do {
2464 err = nfs4_handle_exception(server,
2465 _nfs4_server_capabilities(server, fhandle),
2466 &exception);
2467 } while (exception.retry);
2468 return err;
2469 }
2470
2471 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2472 struct nfs_fsinfo *info)
2473 {
2474 struct nfs4_lookup_root_arg args = {
2475 .bitmask = nfs4_fattr_bitmap,
2476 };
2477 struct nfs4_lookup_res res = {
2478 .server = server,
2479 .fattr = info->fattr,
2480 .fh = fhandle,
2481 };
2482 struct rpc_message msg = {
2483 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2484 .rpc_argp = &args,
2485 .rpc_resp = &res,
2486 };
2487
2488 nfs_fattr_init(info->fattr);
2489 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2490 }
2491
2492 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2493 struct nfs_fsinfo *info)
2494 {
2495 struct nfs4_exception exception = { };
2496 int err;
2497 do {
2498 err = _nfs4_lookup_root(server, fhandle, info);
2499 switch (err) {
2500 case 0:
2501 case -NFS4ERR_WRONGSEC:
2502 goto out;
2503 default:
2504 err = nfs4_handle_exception(server, err, &exception);
2505 }
2506 } while (exception.retry);
2507 out:
2508 return err;
2509 }
2510
2511 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2512 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2513 {
2514 struct rpc_auth *auth;
2515 int ret;
2516
2517 auth = rpcauth_create(flavor, server->client);
2518 if (IS_ERR(auth)) {
2519 ret = -EIO;
2520 goto out;
2521 }
2522 ret = nfs4_lookup_root(server, fhandle, info);
2523 out:
2524 return ret;
2525 }
2526
2527 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2528 struct nfs_fsinfo *info)
2529 {
2530 int i, len, status = 0;
2531 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
2532
2533 len = rpcauth_list_flavors(flav_array, ARRAY_SIZE(flav_array));
2534 if (len < 0)
2535 return len;
2536
2537 for (i = 0; i < len; i++) {
2538 /* AUTH_UNIX is the default flavor if none was specified,
2539 * thus has already been tried. */
2540 if (flav_array[i] == RPC_AUTH_UNIX)
2541 continue;
2542
2543 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2544 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2545 continue;
2546 break;
2547 }
2548 /*
2549 * -EACCESS could mean that the user doesn't have correct permissions
2550 * to access the mount. It could also mean that we tried to mount
2551 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
2552 * existing mount programs don't handle -EACCES very well so it should
2553 * be mapped to -EPERM instead.
2554 */
2555 if (status == -EACCES)
2556 status = -EPERM;
2557 return status;
2558 }
2559
2560 /*
2561 * get the file handle for the "/" directory on the server
2562 */
2563 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
2564 struct nfs_fsinfo *info)
2565 {
2566 int minor_version = server->nfs_client->cl_minorversion;
2567 int status = nfs4_lookup_root(server, fhandle, info);
2568 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2569 /*
2570 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2571 * by nfs4_map_errors() as this function exits.
2572 */
2573 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
2574 if (status == 0)
2575 status = nfs4_server_capabilities(server, fhandle);
2576 if (status == 0)
2577 status = nfs4_do_fsinfo(server, fhandle, info);
2578 return nfs4_map_errors(status);
2579 }
2580
2581 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
2582 struct nfs_fsinfo *info)
2583 {
2584 int error;
2585 struct nfs_fattr *fattr = info->fattr;
2586
2587 error = nfs4_server_capabilities(server, mntfh);
2588 if (error < 0) {
2589 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
2590 return error;
2591 }
2592
2593 error = nfs4_proc_getattr(server, mntfh, fattr);
2594 if (error < 0) {
2595 dprintk("nfs4_get_root: getattr error = %d\n", -error);
2596 return error;
2597 }
2598
2599 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
2600 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
2601 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
2602
2603 return error;
2604 }
2605
2606 /*
2607 * Get locations and (maybe) other attributes of a referral.
2608 * Note that we'll actually follow the referral later when
2609 * we detect fsid mismatch in inode revalidation
2610 */
2611 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
2612 const struct qstr *name, struct nfs_fattr *fattr,
2613 struct nfs_fh *fhandle)
2614 {
2615 int status = -ENOMEM;
2616 struct page *page = NULL;
2617 struct nfs4_fs_locations *locations = NULL;
2618
2619 page = alloc_page(GFP_KERNEL);
2620 if (page == NULL)
2621 goto out;
2622 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2623 if (locations == NULL)
2624 goto out;
2625
2626 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
2627 if (status != 0)
2628 goto out;
2629 /* Make sure server returned a different fsid for the referral */
2630 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2631 dprintk("%s: server did not return a different fsid for"
2632 " a referral at %s\n", __func__, name->name);
2633 status = -EIO;
2634 goto out;
2635 }
2636 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2637 nfs_fixup_referral_attributes(&locations->fattr);
2638
2639 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2640 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2641 memset(fhandle, 0, sizeof(struct nfs_fh));
2642 out:
2643 if (page)
2644 __free_page(page);
2645 kfree(locations);
2646 return status;
2647 }
2648
2649 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2650 {
2651 struct nfs4_getattr_arg args = {
2652 .fh = fhandle,
2653 .bitmask = server->attr_bitmask,
2654 };
2655 struct nfs4_getattr_res res = {
2656 .fattr = fattr,
2657 .server = server,
2658 };
2659 struct rpc_message msg = {
2660 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2661 .rpc_argp = &args,
2662 .rpc_resp = &res,
2663 };
2664
2665 nfs_fattr_init(fattr);
2666 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2667 }
2668
2669 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2670 {
2671 struct nfs4_exception exception = { };
2672 int err;
2673 do {
2674 err = nfs4_handle_exception(server,
2675 _nfs4_proc_getattr(server, fhandle, fattr),
2676 &exception);
2677 } while (exception.retry);
2678 return err;
2679 }
2680
2681 /*
2682 * The file is not closed if it is opened due to the a request to change
2683 * the size of the file. The open call will not be needed once the
2684 * VFS layer lookup-intents are implemented.
2685 *
2686 * Close is called when the inode is destroyed.
2687 * If we haven't opened the file for O_WRONLY, we
2688 * need to in the size_change case to obtain a stateid.
2689 *
2690 * Got race?
2691 * Because OPEN is always done by name in nfsv4, it is
2692 * possible that we opened a different file by the same
2693 * name. We can recognize this race condition, but we
2694 * can't do anything about it besides returning an error.
2695 *
2696 * This will be fixed with VFS changes (lookup-intent).
2697 */
2698 static int
2699 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2700 struct iattr *sattr)
2701 {
2702 struct inode *inode = dentry->d_inode;
2703 struct rpc_cred *cred = NULL;
2704 struct nfs4_state *state = NULL;
2705 int status;
2706
2707 if (pnfs_ld_layoutret_on_setattr(inode))
2708 pnfs_return_layout(inode);
2709
2710 nfs_fattr_init(fattr);
2711
2712 /* Deal with open(O_TRUNC) */
2713 if (sattr->ia_valid & ATTR_OPEN)
2714 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2715
2716 /* Optimization: if the end result is no change, don't RPC */
2717 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0)
2718 return 0;
2719
2720 /* Search for an existing open(O_WRITE) file */
2721 if (sattr->ia_valid & ATTR_FILE) {
2722 struct nfs_open_context *ctx;
2723
2724 ctx = nfs_file_open_context(sattr->ia_file);
2725 if (ctx) {
2726 cred = ctx->cred;
2727 state = ctx->state;
2728 }
2729 }
2730
2731 status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2732 if (status == 0)
2733 nfs_setattr_update_inode(inode, sattr);
2734 return status;
2735 }
2736
2737 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2738 const struct qstr *name, struct nfs_fh *fhandle,
2739 struct nfs_fattr *fattr)
2740 {
2741 struct nfs_server *server = NFS_SERVER(dir);
2742 int status;
2743 struct nfs4_lookup_arg args = {
2744 .bitmask = server->attr_bitmask,
2745 .dir_fh = NFS_FH(dir),
2746 .name = name,
2747 };
2748 struct nfs4_lookup_res res = {
2749 .server = server,
2750 .fattr = fattr,
2751 .fh = fhandle,
2752 };
2753 struct rpc_message msg = {
2754 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2755 .rpc_argp = &args,
2756 .rpc_resp = &res,
2757 };
2758
2759 nfs_fattr_init(fattr);
2760
2761 dprintk("NFS call lookup %s\n", name->name);
2762 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2763 dprintk("NFS reply lookup: %d\n", status);
2764 return status;
2765 }
2766
2767 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
2768 {
2769 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2770 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
2771 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2772 fattr->nlink = 2;
2773 }
2774
2775 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
2776 struct qstr *name, struct nfs_fh *fhandle,
2777 struct nfs_fattr *fattr)
2778 {
2779 struct nfs4_exception exception = { };
2780 struct rpc_clnt *client = *clnt;
2781 int err;
2782 do {
2783 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
2784 switch (err) {
2785 case -NFS4ERR_BADNAME:
2786 err = -ENOENT;
2787 goto out;
2788 case -NFS4ERR_MOVED:
2789 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
2790 goto out;
2791 case -NFS4ERR_WRONGSEC:
2792 err = -EPERM;
2793 if (client != *clnt)
2794 goto out;
2795
2796 client = nfs4_create_sec_client(client, dir, name);
2797 if (IS_ERR(client))
2798 return PTR_ERR(client);
2799
2800 exception.retry = 1;
2801 break;
2802 default:
2803 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
2804 }
2805 } while (exception.retry);
2806
2807 out:
2808 if (err == 0)
2809 *clnt = client;
2810 else if (client != *clnt)
2811 rpc_shutdown_client(client);
2812
2813 return err;
2814 }
2815
2816 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
2817 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2818 {
2819 int status;
2820 struct rpc_clnt *client = NFS_CLIENT(dir);
2821
2822 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2823 if (client != NFS_CLIENT(dir)) {
2824 rpc_shutdown_client(client);
2825 nfs_fixup_secinfo_attributes(fattr);
2826 }
2827 return status;
2828 }
2829
2830 struct rpc_clnt *
2831 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
2832 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2833 {
2834 int status;
2835 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
2836
2837 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2838 if (status < 0) {
2839 rpc_shutdown_client(client);
2840 return ERR_PTR(status);
2841 }
2842 return client;
2843 }
2844
2845 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2846 {
2847 struct nfs_server *server = NFS_SERVER(inode);
2848 struct nfs4_accessargs args = {
2849 .fh = NFS_FH(inode),
2850 .bitmask = server->cache_consistency_bitmask,
2851 };
2852 struct nfs4_accessres res = {
2853 .server = server,
2854 };
2855 struct rpc_message msg = {
2856 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2857 .rpc_argp = &args,
2858 .rpc_resp = &res,
2859 .rpc_cred = entry->cred,
2860 };
2861 int mode = entry->mask;
2862 int status;
2863
2864 /*
2865 * Determine which access bits we want to ask for...
2866 */
2867 if (mode & MAY_READ)
2868 args.access |= NFS4_ACCESS_READ;
2869 if (S_ISDIR(inode->i_mode)) {
2870 if (mode & MAY_WRITE)
2871 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2872 if (mode & MAY_EXEC)
2873 args.access |= NFS4_ACCESS_LOOKUP;
2874 } else {
2875 if (mode & MAY_WRITE)
2876 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2877 if (mode & MAY_EXEC)
2878 args.access |= NFS4_ACCESS_EXECUTE;
2879 }
2880
2881 res.fattr = nfs_alloc_fattr();
2882 if (res.fattr == NULL)
2883 return -ENOMEM;
2884
2885 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2886 if (!status) {
2887 nfs_access_set_mask(entry, res.access);
2888 nfs_refresh_inode(inode, res.fattr);
2889 }
2890 nfs_free_fattr(res.fattr);
2891 return status;
2892 }
2893
2894 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2895 {
2896 struct nfs4_exception exception = { };
2897 int err;
2898 do {
2899 err = nfs4_handle_exception(NFS_SERVER(inode),
2900 _nfs4_proc_access(inode, entry),
2901 &exception);
2902 } while (exception.retry);
2903 return err;
2904 }
2905
2906 /*
2907 * TODO: For the time being, we don't try to get any attributes
2908 * along with any of the zero-copy operations READ, READDIR,
2909 * READLINK, WRITE.
2910 *
2911 * In the case of the first three, we want to put the GETATTR
2912 * after the read-type operation -- this is because it is hard
2913 * to predict the length of a GETATTR response in v4, and thus
2914 * align the READ data correctly. This means that the GETATTR
2915 * may end up partially falling into the page cache, and we should
2916 * shift it into the 'tail' of the xdr_buf before processing.
2917 * To do this efficiently, we need to know the total length
2918 * of data received, which doesn't seem to be available outside
2919 * of the RPC layer.
2920 *
2921 * In the case of WRITE, we also want to put the GETATTR after
2922 * the operation -- in this case because we want to make sure
2923 * we get the post-operation mtime and size.
2924 *
2925 * Both of these changes to the XDR layer would in fact be quite
2926 * minor, but I decided to leave them for a subsequent patch.
2927 */
2928 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2929 unsigned int pgbase, unsigned int pglen)
2930 {
2931 struct nfs4_readlink args = {
2932 .fh = NFS_FH(inode),
2933 .pgbase = pgbase,
2934 .pglen = pglen,
2935 .pages = &page,
2936 };
2937 struct nfs4_readlink_res res;
2938 struct rpc_message msg = {
2939 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2940 .rpc_argp = &args,
2941 .rpc_resp = &res,
2942 };
2943
2944 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
2945 }
2946
2947 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2948 unsigned int pgbase, unsigned int pglen)
2949 {
2950 struct nfs4_exception exception = { };
2951 int err;
2952 do {
2953 err = nfs4_handle_exception(NFS_SERVER(inode),
2954 _nfs4_proc_readlink(inode, page, pgbase, pglen),
2955 &exception);
2956 } while (exception.retry);
2957 return err;
2958 }
2959
2960 /*
2961 * This is just for mknod. open(O_CREAT) will always do ->open_context().
2962 */
2963 static int
2964 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2965 int flags)
2966 {
2967 struct nfs_open_context *ctx;
2968 struct nfs4_state *state;
2969 int status = 0;
2970
2971 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
2972 if (IS_ERR(ctx))
2973 return PTR_ERR(ctx);
2974
2975 sattr->ia_mode &= ~current_umask();
2976 state = nfs4_do_open(dir, dentry, ctx->mode,
2977 flags, sattr, ctx->cred,
2978 &ctx->mdsthreshold);
2979 d_drop(dentry);
2980 if (IS_ERR(state)) {
2981 status = PTR_ERR(state);
2982 goto out;
2983 }
2984 d_add(dentry, igrab(state->inode));
2985 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2986 ctx->state = state;
2987 out:
2988 put_nfs_open_context(ctx);
2989 return status;
2990 }
2991
2992 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2993 {
2994 struct nfs_server *server = NFS_SERVER(dir);
2995 struct nfs_removeargs args = {
2996 .fh = NFS_FH(dir),
2997 .name = *name,
2998 };
2999 struct nfs_removeres res = {
3000 .server = server,
3001 };
3002 struct rpc_message msg = {
3003 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3004 .rpc_argp = &args,
3005 .rpc_resp = &res,
3006 };
3007 int status;
3008
3009 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3010 if (status == 0)
3011 update_changeattr(dir, &res.cinfo);
3012 return status;
3013 }
3014
3015 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3016 {
3017 struct nfs4_exception exception = { };
3018 int err;
3019 do {
3020 err = nfs4_handle_exception(NFS_SERVER(dir),
3021 _nfs4_proc_remove(dir, name),
3022 &exception);
3023 } while (exception.retry);
3024 return err;
3025 }
3026
3027 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3028 {
3029 struct nfs_server *server = NFS_SERVER(dir);
3030 struct nfs_removeargs *args = msg->rpc_argp;
3031 struct nfs_removeres *res = msg->rpc_resp;
3032
3033 res->server = server;
3034 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3035 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
3036 }
3037
3038 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3039 {
3040 if (nfs4_setup_sequence(NFS_SERVER(data->dir),
3041 &data->args.seq_args,
3042 &data->res.seq_res,
3043 task))
3044 return;
3045 rpc_call_start(task);
3046 }
3047
3048 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3049 {
3050 struct nfs_removeres *res = task->tk_msg.rpc_resp;
3051
3052 if (!nfs4_sequence_done(task, &res->seq_res))
3053 return 0;
3054 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
3055 return 0;
3056 update_changeattr(dir, &res->cinfo);
3057 return 1;
3058 }
3059
3060 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3061 {
3062 struct nfs_server *server = NFS_SERVER(dir);
3063 struct nfs_renameargs *arg = msg->rpc_argp;
3064 struct nfs_renameres *res = msg->rpc_resp;
3065
3066 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3067 res->server = server;
3068 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
3069 }
3070
3071 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3072 {
3073 if (nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3074 &data->args.seq_args,
3075 &data->res.seq_res,
3076 task))
3077 return;
3078 rpc_call_start(task);
3079 }
3080
3081 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3082 struct inode *new_dir)
3083 {
3084 struct nfs_renameres *res = task->tk_msg.rpc_resp;
3085
3086 if (!nfs4_sequence_done(task, &res->seq_res))
3087 return 0;
3088 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
3089 return 0;
3090
3091 update_changeattr(old_dir, &res->old_cinfo);
3092 update_changeattr(new_dir, &res->new_cinfo);
3093 return 1;
3094 }
3095
3096 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
3097 struct inode *new_dir, struct qstr *new_name)
3098 {
3099 struct nfs_server *server = NFS_SERVER(old_dir);
3100 struct nfs_renameargs arg = {
3101 .old_dir = NFS_FH(old_dir),
3102 .new_dir = NFS_FH(new_dir),
3103 .old_name = old_name,
3104 .new_name = new_name,
3105 };
3106 struct nfs_renameres res = {
3107 .server = server,
3108 };
3109 struct rpc_message msg = {
3110 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
3111 .rpc_argp = &arg,
3112 .rpc_resp = &res,
3113 };
3114 int status = -ENOMEM;
3115
3116 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3117 if (!status) {
3118 update_changeattr(old_dir, &res.old_cinfo);
3119 update_changeattr(new_dir, &res.new_cinfo);
3120 }
3121 return status;
3122 }
3123
3124 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
3125 struct inode *new_dir, struct qstr *new_name)
3126 {
3127 struct nfs4_exception exception = { };
3128 int err;
3129 do {
3130 err = nfs4_handle_exception(NFS_SERVER(old_dir),
3131 _nfs4_proc_rename(old_dir, old_name,
3132 new_dir, new_name),
3133 &exception);
3134 } while (exception.retry);
3135 return err;
3136 }
3137
3138 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3139 {
3140 struct nfs_server *server = NFS_SERVER(inode);
3141 struct nfs4_link_arg arg = {
3142 .fh = NFS_FH(inode),
3143 .dir_fh = NFS_FH(dir),
3144 .name = name,
3145 .bitmask = server->attr_bitmask,
3146 };
3147 struct nfs4_link_res res = {
3148 .server = server,
3149 };
3150 struct rpc_message msg = {
3151 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3152 .rpc_argp = &arg,
3153 .rpc_resp = &res,
3154 };
3155 int status = -ENOMEM;
3156
3157 res.fattr = nfs_alloc_fattr();
3158 if (res.fattr == NULL)
3159 goto out;
3160
3161 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3162 if (!status) {
3163 update_changeattr(dir, &res.cinfo);
3164 nfs_post_op_update_inode(inode, res.fattr);
3165 }
3166 out:
3167 nfs_free_fattr(res.fattr);
3168 return status;
3169 }
3170
3171 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3172 {
3173 struct nfs4_exception exception = { };
3174 int err;
3175 do {
3176 err = nfs4_handle_exception(NFS_SERVER(inode),
3177 _nfs4_proc_link(inode, dir, name),
3178 &exception);
3179 } while (exception.retry);
3180 return err;
3181 }
3182
3183 struct nfs4_createdata {
3184 struct rpc_message msg;
3185 struct nfs4_create_arg arg;
3186 struct nfs4_create_res res;
3187 struct nfs_fh fh;
3188 struct nfs_fattr fattr;
3189 };
3190
3191 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3192 struct qstr *name, struct iattr *sattr, u32 ftype)
3193 {
3194 struct nfs4_createdata *data;
3195
3196 data = kzalloc(sizeof(*data), GFP_KERNEL);
3197 if (data != NULL) {
3198 struct nfs_server *server = NFS_SERVER(dir);
3199
3200 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3201 data->msg.rpc_argp = &data->arg;
3202 data->msg.rpc_resp = &data->res;
3203 data->arg.dir_fh = NFS_FH(dir);
3204 data->arg.server = server;
3205 data->arg.name = name;
3206 data->arg.attrs = sattr;
3207 data->arg.ftype = ftype;
3208 data->arg.bitmask = server->attr_bitmask;
3209 data->res.server = server;
3210 data->res.fh = &data->fh;
3211 data->res.fattr = &data->fattr;
3212 nfs_fattr_init(data->res.fattr);
3213 }
3214 return data;
3215 }
3216
3217 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3218 {
3219 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3220 &data->arg.seq_args, &data->res.seq_res, 1);
3221 if (status == 0) {
3222 update_changeattr(dir, &data->res.dir_cinfo);
3223 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
3224 }
3225 return status;
3226 }
3227
3228 static void nfs4_free_createdata(struct nfs4_createdata *data)
3229 {
3230 kfree(data);
3231 }
3232
3233 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3234 struct page *page, unsigned int len, struct iattr *sattr)
3235 {
3236 struct nfs4_createdata *data;
3237 int status = -ENAMETOOLONG;
3238
3239 if (len > NFS4_MAXPATHLEN)
3240 goto out;
3241
3242 status = -ENOMEM;
3243 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3244 if (data == NULL)
3245 goto out;
3246
3247 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3248 data->arg.u.symlink.pages = &page;
3249 data->arg.u.symlink.len = len;
3250
3251 status = nfs4_do_create(dir, dentry, data);
3252
3253 nfs4_free_createdata(data);
3254 out:
3255 return status;
3256 }
3257
3258 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3259 struct page *page, unsigned int len, struct iattr *sattr)
3260 {
3261 struct nfs4_exception exception = { };
3262 int err;
3263 do {
3264 err = nfs4_handle_exception(NFS_SERVER(dir),
3265 _nfs4_proc_symlink(dir, dentry, page,
3266 len, sattr),
3267 &exception);
3268 } while (exception.retry);
3269 return err;
3270 }
3271
3272 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3273 struct iattr *sattr)
3274 {
3275 struct nfs4_createdata *data;
3276 int status = -ENOMEM;
3277
3278 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3279 if (data == NULL)
3280 goto out;
3281
3282 status = nfs4_do_create(dir, dentry, data);
3283
3284 nfs4_free_createdata(data);
3285 out:
3286 return status;
3287 }
3288
3289 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3290 struct iattr *sattr)
3291 {
3292 struct nfs4_exception exception = { };
3293 int err;
3294
3295 sattr->ia_mode &= ~current_umask();
3296 do {
3297 err = nfs4_handle_exception(NFS_SERVER(dir),
3298 _nfs4_proc_mkdir(dir, dentry, sattr),
3299 &exception);
3300 } while (exception.retry);
3301 return err;
3302 }
3303
3304 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3305 u64 cookie, struct page **pages, unsigned int count, int plus)
3306 {
3307 struct inode *dir = dentry->d_inode;
3308 struct nfs4_readdir_arg args = {
3309 .fh = NFS_FH(dir),
3310 .pages = pages,
3311 .pgbase = 0,
3312 .count = count,
3313 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3314 .plus = plus,
3315 };
3316 struct nfs4_readdir_res res;
3317 struct rpc_message msg = {
3318 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3319 .rpc_argp = &args,
3320 .rpc_resp = &res,
3321 .rpc_cred = cred,
3322 };
3323 int status;
3324
3325 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
3326 dentry->d_parent->d_name.name,
3327 dentry->d_name.name,
3328 (unsigned long long)cookie);
3329 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3330 res.pgbase = args.pgbase;
3331 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3332 if (status >= 0) {
3333 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3334 status += args.pgbase;
3335 }
3336
3337 nfs_invalidate_atime(dir);
3338
3339 dprintk("%s: returns %d\n", __func__, status);
3340 return status;
3341 }
3342
3343 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3344 u64 cookie, struct page **pages, unsigned int count, int plus)
3345 {
3346 struct nfs4_exception exception = { };
3347 int err;
3348 do {
3349 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3350 _nfs4_proc_readdir(dentry, cred, cookie,
3351 pages, count, plus),
3352 &exception);
3353 } while (exception.retry);
3354 return err;
3355 }
3356
3357 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3358 struct iattr *sattr, dev_t rdev)
3359 {
3360 struct nfs4_createdata *data;
3361 int mode = sattr->ia_mode;
3362 int status = -ENOMEM;
3363
3364 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3365 if (data == NULL)
3366 goto out;
3367
3368 if (S_ISFIFO(mode))
3369 data->arg.ftype = NF4FIFO;
3370 else if (S_ISBLK(mode)) {
3371 data->arg.ftype = NF4BLK;
3372 data->arg.u.device.specdata1 = MAJOR(rdev);
3373 data->arg.u.device.specdata2 = MINOR(rdev);
3374 }
3375 else if (S_ISCHR(mode)) {
3376 data->arg.ftype = NF4CHR;
3377 data->arg.u.device.specdata1 = MAJOR(rdev);
3378 data->arg.u.device.specdata2 = MINOR(rdev);
3379 } else if (!S_ISSOCK(mode)) {
3380 status = -EINVAL;
3381 goto out_free;
3382 }
3383
3384 status = nfs4_do_create(dir, dentry, data);
3385 out_free:
3386 nfs4_free_createdata(data);
3387 out:
3388 return status;
3389 }
3390
3391 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3392 struct iattr *sattr, dev_t rdev)
3393 {
3394 struct nfs4_exception exception = { };
3395 int err;
3396
3397 sattr->ia_mode &= ~current_umask();
3398 do {
3399 err = nfs4_handle_exception(NFS_SERVER(dir),
3400 _nfs4_proc_mknod(dir, dentry, sattr, rdev),
3401 &exception);
3402 } while (exception.retry);
3403 return err;
3404 }
3405
3406 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3407 struct nfs_fsstat *fsstat)
3408 {
3409 struct nfs4_statfs_arg args = {
3410 .fh = fhandle,
3411 .bitmask = server->attr_bitmask,
3412 };
3413 struct nfs4_statfs_res res = {
3414 .fsstat = fsstat,
3415 };
3416 struct rpc_message msg = {
3417 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3418 .rpc_argp = &args,
3419 .rpc_resp = &res,
3420 };
3421
3422 nfs_fattr_init(fsstat->fattr);
3423 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3424 }
3425
3426 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3427 {
3428 struct nfs4_exception exception = { };
3429 int err;
3430 do {
3431 err = nfs4_handle_exception(server,
3432 _nfs4_proc_statfs(server, fhandle, fsstat),
3433 &exception);
3434 } while (exception.retry);
3435 return err;
3436 }
3437
3438 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3439 struct nfs_fsinfo *fsinfo)
3440 {
3441 struct nfs4_fsinfo_arg args = {
3442 .fh = fhandle,
3443 .bitmask = server->attr_bitmask,
3444 };
3445 struct nfs4_fsinfo_res res = {
3446 .fsinfo = fsinfo,
3447 };
3448 struct rpc_message msg = {
3449 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3450 .rpc_argp = &args,
3451 .rpc_resp = &res,
3452 };
3453
3454 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3455 }
3456
3457 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3458 {
3459 struct nfs4_exception exception = { };
3460 int err;
3461
3462 do {
3463 err = nfs4_handle_exception(server,
3464 _nfs4_do_fsinfo(server, fhandle, fsinfo),
3465 &exception);
3466 } while (exception.retry);
3467 return err;
3468 }
3469
3470 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3471 {
3472 int error;
3473
3474 nfs_fattr_init(fsinfo->fattr);
3475 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
3476 if (error == 0) {
3477 /* block layout checks this! */
3478 server->pnfs_blksize = fsinfo->blksize;
3479 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
3480 }
3481
3482 return error;
3483 }
3484
3485 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3486 struct nfs_pathconf *pathconf)
3487 {
3488 struct nfs4_pathconf_arg args = {
3489 .fh = fhandle,
3490 .bitmask = server->attr_bitmask,
3491 };
3492 struct nfs4_pathconf_res res = {
3493 .pathconf = pathconf,
3494 };
3495 struct rpc_message msg = {
3496 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3497 .rpc_argp = &args,
3498 .rpc_resp = &res,
3499 };
3500
3501 /* None of the pathconf attributes are mandatory to implement */
3502 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3503 memset(pathconf, 0, sizeof(*pathconf));
3504 return 0;
3505 }
3506
3507 nfs_fattr_init(pathconf->fattr);
3508 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3509 }
3510
3511 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3512 struct nfs_pathconf *pathconf)
3513 {
3514 struct nfs4_exception exception = { };
3515 int err;
3516
3517 do {
3518 err = nfs4_handle_exception(server,
3519 _nfs4_proc_pathconf(server, fhandle, pathconf),
3520 &exception);
3521 } while (exception.retry);
3522 return err;
3523 }
3524
3525 void __nfs4_read_done_cb(struct nfs_read_data *data)
3526 {
3527 nfs_invalidate_atime(data->header->inode);
3528 }
3529
3530 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3531 {
3532 struct nfs_server *server = NFS_SERVER(data->header->inode);
3533
3534 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3535 rpc_restart_call_prepare(task);
3536 return -EAGAIN;
3537 }
3538
3539 __nfs4_read_done_cb(data);
3540 if (task->tk_status > 0)
3541 renew_lease(server, data->timestamp);
3542 return 0;
3543 }
3544
3545 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3546 {
3547
3548 dprintk("--> %s\n", __func__);
3549
3550 if (!nfs4_sequence_done(task, &data->res.seq_res))
3551 return -EAGAIN;
3552
3553 return data->read_done_cb ? data->read_done_cb(task, data) :
3554 nfs4_read_done_cb(task, data);
3555 }
3556
3557 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3558 {
3559 data->timestamp = jiffies;
3560 data->read_done_cb = nfs4_read_done_cb;
3561 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3562 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
3563 }
3564
3565 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
3566 {
3567 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3568 &data->args.seq_args,
3569 &data->res.seq_res,
3570 task))
3571 return;
3572 rpc_call_start(task);
3573 }
3574
3575 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3576 {
3577 struct inode *inode = data->header->inode;
3578
3579 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3580 rpc_restart_call_prepare(task);
3581 return -EAGAIN;
3582 }
3583 if (task->tk_status >= 0) {
3584 renew_lease(NFS_SERVER(inode), data->timestamp);
3585 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
3586 }
3587 return 0;
3588 }
3589
3590 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3591 {
3592 if (!nfs4_sequence_done(task, &data->res.seq_res))
3593 return -EAGAIN;
3594 return data->write_done_cb ? data->write_done_cb(task, data) :
3595 nfs4_write_done_cb(task, data);
3596 }
3597
3598 static
3599 bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
3600 {
3601 const struct nfs_pgio_header *hdr = data->header;
3602
3603 /* Don't request attributes for pNFS or O_DIRECT writes */
3604 if (data->ds_clp != NULL || hdr->dreq != NULL)
3605 return false;
3606 /* Otherwise, request attributes if and only if we don't hold
3607 * a delegation
3608 */
3609 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
3610 }
3611
3612 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3613 {
3614 struct nfs_server *server = NFS_SERVER(data->header->inode);
3615
3616 if (!nfs4_write_need_cache_consistency_data(data)) {
3617 data->args.bitmask = NULL;
3618 data->res.fattr = NULL;
3619 } else
3620 data->args.bitmask = server->cache_consistency_bitmask;
3621
3622 if (!data->write_done_cb)
3623 data->write_done_cb = nfs4_write_done_cb;
3624 data->res.server = server;
3625 data->timestamp = jiffies;
3626
3627 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3628 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3629 }
3630
3631 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
3632 {
3633 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3634 &data->args.seq_args,
3635 &data->res.seq_res,
3636 task))
3637 return;
3638 rpc_call_start(task);
3639 }
3640
3641 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
3642 {
3643 if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3644 &data->args.seq_args,
3645 &data->res.seq_res,
3646 task))
3647 return;
3648 rpc_call_start(task);
3649 }
3650
3651 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
3652 {
3653 struct inode *inode = data->inode;
3654
3655 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3656 rpc_restart_call_prepare(task);
3657 return -EAGAIN;
3658 }
3659 return 0;
3660 }
3661
3662 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
3663 {
3664 if (!nfs4_sequence_done(task, &data->res.seq_res))
3665 return -EAGAIN;
3666 return data->commit_done_cb(task, data);
3667 }
3668
3669 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
3670 {
3671 struct nfs_server *server = NFS_SERVER(data->inode);
3672
3673 if (data->commit_done_cb == NULL)
3674 data->commit_done_cb = nfs4_commit_done_cb;
3675 data->res.server = server;
3676 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3677 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3678 }
3679
3680 struct nfs4_renewdata {
3681 struct nfs_client *client;
3682 unsigned long timestamp;
3683 };
3684
3685 /*
3686 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3687 * standalone procedure for queueing an asynchronous RENEW.
3688 */
3689 static void nfs4_renew_release(void *calldata)
3690 {
3691 struct nfs4_renewdata *data = calldata;
3692 struct nfs_client *clp = data->client;
3693
3694 if (atomic_read(&clp->cl_count) > 1)
3695 nfs4_schedule_state_renewal(clp);
3696 nfs_put_client(clp);
3697 kfree(data);
3698 }
3699
3700 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3701 {
3702 struct nfs4_renewdata *data = calldata;
3703 struct nfs_client *clp = data->client;
3704 unsigned long timestamp = data->timestamp;
3705
3706 if (task->tk_status < 0) {
3707 /* Unless we're shutting down, schedule state recovery! */
3708 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3709 return;
3710 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3711 nfs4_schedule_lease_recovery(clp);
3712 return;
3713 }
3714 nfs4_schedule_path_down_recovery(clp);
3715 }
3716 do_renew_lease(clp, timestamp);
3717 }
3718
3719 static const struct rpc_call_ops nfs4_renew_ops = {
3720 .rpc_call_done = nfs4_renew_done,
3721 .rpc_release = nfs4_renew_release,
3722 };
3723
3724 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3725 {
3726 struct rpc_message msg = {
3727 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3728 .rpc_argp = clp,
3729 .rpc_cred = cred,
3730 };
3731 struct nfs4_renewdata *data;
3732
3733 if (renew_flags == 0)
3734 return 0;
3735 if (!atomic_inc_not_zero(&clp->cl_count))
3736 return -EIO;
3737 data = kmalloc(sizeof(*data), GFP_NOFS);
3738 if (data == NULL)
3739 return -ENOMEM;
3740 data->client = clp;
3741 data->timestamp = jiffies;
3742 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3743 &nfs4_renew_ops, data);
3744 }
3745
3746 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3747 {
3748 struct rpc_message msg = {
3749 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3750 .rpc_argp = clp,
3751 .rpc_cred = cred,
3752 };
3753 unsigned long now = jiffies;
3754 int status;
3755
3756 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3757 if (status < 0)
3758 return status;
3759 do_renew_lease(clp, now);
3760 return 0;
3761 }
3762
3763 static inline int nfs4_server_supports_acls(struct nfs_server *server)
3764 {
3765 return (server->caps & NFS_CAP_ACLS)
3766 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3767 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3768 }
3769
3770 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
3771 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
3772 * the stack.
3773 */
3774 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
3775
3776 static int buf_to_pages_noslab(const void *buf, size_t buflen,
3777 struct page **pages, unsigned int *pgbase)
3778 {
3779 struct page *newpage, **spages;
3780 int rc = 0;
3781 size_t len;
3782 spages = pages;
3783
3784 do {
3785 len = min_t(size_t, PAGE_SIZE, buflen);
3786 newpage = alloc_page(GFP_KERNEL);
3787
3788 if (newpage == NULL)
3789 goto unwind;
3790 memcpy(page_address(newpage), buf, len);
3791 buf += len;
3792 buflen -= len;
3793 *pages++ = newpage;
3794 rc++;
3795 } while (buflen != 0);
3796
3797 return rc;
3798
3799 unwind:
3800 for(; rc > 0; rc--)
3801 __free_page(spages[rc-1]);
3802 return -ENOMEM;
3803 }
3804
3805 struct nfs4_cached_acl {
3806 int cached;
3807 size_t len;
3808 char data[0];
3809 };
3810
3811 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3812 {
3813 struct nfs_inode *nfsi = NFS_I(inode);
3814
3815 spin_lock(&inode->i_lock);
3816 kfree(nfsi->nfs4_acl);
3817 nfsi->nfs4_acl = acl;
3818 spin_unlock(&inode->i_lock);
3819 }
3820
3821 static void nfs4_zap_acl_attr(struct inode *inode)
3822 {
3823 nfs4_set_cached_acl(inode, NULL);
3824 }
3825
3826 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3827 {
3828 struct nfs_inode *nfsi = NFS_I(inode);
3829 struct nfs4_cached_acl *acl;
3830 int ret = -ENOENT;
3831
3832 spin_lock(&inode->i_lock);
3833 acl = nfsi->nfs4_acl;
3834 if (acl == NULL)
3835 goto out;
3836 if (buf == NULL) /* user is just asking for length */
3837 goto out_len;
3838 if (acl->cached == 0)
3839 goto out;
3840 ret = -ERANGE; /* see getxattr(2) man page */
3841 if (acl->len > buflen)
3842 goto out;
3843 memcpy(buf, acl->data, acl->len);
3844 out_len:
3845 ret = acl->len;
3846 out:
3847 spin_unlock(&inode->i_lock);
3848 return ret;
3849 }
3850
3851 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3852 {
3853 struct nfs4_cached_acl *acl;
3854 size_t buflen = sizeof(*acl) + acl_len;
3855
3856 if (buflen <= PAGE_SIZE) {
3857 acl = kmalloc(buflen, GFP_KERNEL);
3858 if (acl == NULL)
3859 goto out;
3860 acl->cached = 1;
3861 _copy_from_pages(acl->data, pages, pgbase, acl_len);
3862 } else {
3863 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3864 if (acl == NULL)
3865 goto out;
3866 acl->cached = 0;
3867 }
3868 acl->len = acl_len;
3869 out:
3870 nfs4_set_cached_acl(inode, acl);
3871 }
3872
3873 /*
3874 * The getxattr API returns the required buffer length when called with a
3875 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3876 * the required buf. On a NULL buf, we send a page of data to the server
3877 * guessing that the ACL request can be serviced by a page. If so, we cache
3878 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3879 * the cache. If not so, we throw away the page, and cache the required
3880 * length. The next getxattr call will then produce another round trip to
3881 * the server, this time with the input buf of the required size.
3882 */
3883 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3884 {
3885 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3886 struct nfs_getaclargs args = {
3887 .fh = NFS_FH(inode),
3888 .acl_pages = pages,
3889 .acl_len = buflen,
3890 };
3891 struct nfs_getaclres res = {
3892 .acl_len = buflen,
3893 };
3894 struct rpc_message msg = {
3895 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3896 .rpc_argp = &args,
3897 .rpc_resp = &res,
3898 };
3899 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
3900 int ret = -ENOMEM, i;
3901
3902 /* As long as we're doing a round trip to the server anyway,
3903 * let's be prepared for a page of acl data. */
3904 if (npages == 0)
3905 npages = 1;
3906 if (npages > ARRAY_SIZE(pages))
3907 return -ERANGE;
3908
3909 for (i = 0; i < npages; i++) {
3910 pages[i] = alloc_page(GFP_KERNEL);
3911 if (!pages[i])
3912 goto out_free;
3913 }
3914
3915 /* for decoding across pages */
3916 res.acl_scratch = alloc_page(GFP_KERNEL);
3917 if (!res.acl_scratch)
3918 goto out_free;
3919
3920 args.acl_len = npages * PAGE_SIZE;
3921 args.acl_pgbase = 0;
3922
3923 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
3924 __func__, buf, buflen, npages, args.acl_len);
3925 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3926 &msg, &args.seq_args, &res.seq_res, 0);
3927 if (ret)
3928 goto out_free;
3929
3930 /* Handle the case where the passed-in buffer is too short */
3931 if (res.acl_flags & NFS4_ACL_TRUNC) {
3932 /* Did the user only issue a request for the acl length? */
3933 if (buf == NULL)
3934 goto out_ok;
3935 ret = -ERANGE;
3936 goto out_free;
3937 }
3938 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
3939 if (buf)
3940 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
3941 out_ok:
3942 ret = res.acl_len;
3943 out_free:
3944 for (i = 0; i < npages; i++)
3945 if (pages[i])
3946 __free_page(pages[i]);
3947 if (res.acl_scratch)
3948 __free_page(res.acl_scratch);
3949 return ret;
3950 }
3951
3952 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3953 {
3954 struct nfs4_exception exception = { };
3955 ssize_t ret;
3956 do {
3957 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3958 if (ret >= 0)
3959 break;
3960 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3961 } while (exception.retry);
3962 return ret;
3963 }
3964
3965 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3966 {
3967 struct nfs_server *server = NFS_SERVER(inode);
3968 int ret;
3969
3970 if (!nfs4_server_supports_acls(server))
3971 return -EOPNOTSUPP;
3972 ret = nfs_revalidate_inode(server, inode);
3973 if (ret < 0)
3974 return ret;
3975 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
3976 nfs_zap_acl_cache(inode);
3977 ret = nfs4_read_cached_acl(inode, buf, buflen);
3978 if (ret != -ENOENT)
3979 /* -ENOENT is returned if there is no ACL or if there is an ACL
3980 * but no cached acl data, just the acl length */
3981 return ret;
3982 return nfs4_get_acl_uncached(inode, buf, buflen);
3983 }
3984
3985 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3986 {
3987 struct nfs_server *server = NFS_SERVER(inode);
3988 struct page *pages[NFS4ACL_MAXPAGES];
3989 struct nfs_setaclargs arg = {
3990 .fh = NFS_FH(inode),
3991 .acl_pages = pages,
3992 .acl_len = buflen,
3993 };
3994 struct nfs_setaclres res;
3995 struct rpc_message msg = {
3996 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3997 .rpc_argp = &arg,
3998 .rpc_resp = &res,
3999 };
4000 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4001 int ret, i;
4002
4003 if (!nfs4_server_supports_acls(server))
4004 return -EOPNOTSUPP;
4005 if (npages > ARRAY_SIZE(pages))
4006 return -ERANGE;
4007 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
4008 if (i < 0)
4009 return i;
4010 nfs4_inode_return_delegation(inode);
4011 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4012
4013 /*
4014 * Free each page after tx, so the only ref left is
4015 * held by the network stack
4016 */
4017 for (; i > 0; i--)
4018 put_page(pages[i-1]);
4019
4020 /*
4021 * Acl update can result in inode attribute update.
4022 * so mark the attribute cache invalid.
4023 */
4024 spin_lock(&inode->i_lock);
4025 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4026 spin_unlock(&inode->i_lock);
4027 nfs_access_zap_cache(inode);
4028 nfs_zap_acl_cache(inode);
4029 return ret;
4030 }
4031
4032 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4033 {
4034 struct nfs4_exception exception = { };
4035 int err;
4036 do {
4037 err = nfs4_handle_exception(NFS_SERVER(inode),
4038 __nfs4_proc_set_acl(inode, buf, buflen),
4039 &exception);
4040 } while (exception.retry);
4041 return err;
4042 }
4043
4044 static int
4045 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
4046 {
4047 struct nfs_client *clp = server->nfs_client;
4048
4049 if (task->tk_status >= 0)
4050 return 0;
4051 switch(task->tk_status) {
4052 case -NFS4ERR_DELEG_REVOKED:
4053 case -NFS4ERR_ADMIN_REVOKED:
4054 case -NFS4ERR_BAD_STATEID:
4055 if (state == NULL)
4056 break;
4057 nfs_remove_bad_delegation(state->inode);
4058 case -NFS4ERR_OPENMODE:
4059 if (state == NULL)
4060 break;
4061 nfs4_schedule_stateid_recovery(server, state);
4062 goto wait_on_recovery;
4063 case -NFS4ERR_EXPIRED:
4064 if (state != NULL)
4065 nfs4_schedule_stateid_recovery(server, state);
4066 case -NFS4ERR_STALE_STATEID:
4067 case -NFS4ERR_STALE_CLIENTID:
4068 nfs4_schedule_lease_recovery(clp);
4069 goto wait_on_recovery;
4070 #if defined(CONFIG_NFS_V4_1)
4071 case -NFS4ERR_BADSESSION:
4072 case -NFS4ERR_BADSLOT:
4073 case -NFS4ERR_BAD_HIGH_SLOT:
4074 case -NFS4ERR_DEADSESSION:
4075 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4076 case -NFS4ERR_SEQ_FALSE_RETRY:
4077 case -NFS4ERR_SEQ_MISORDERED:
4078 dprintk("%s ERROR %d, Reset session\n", __func__,
4079 task->tk_status);
4080 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4081 task->tk_status = 0;
4082 return -EAGAIN;
4083 #endif /* CONFIG_NFS_V4_1 */
4084 case -NFS4ERR_DELAY:
4085 nfs_inc_server_stats(server, NFSIOS_DELAY);
4086 case -NFS4ERR_GRACE:
4087 case -EKEYEXPIRED:
4088 rpc_delay(task, NFS4_POLL_RETRY_MAX);
4089 task->tk_status = 0;
4090 return -EAGAIN;
4091 case -NFS4ERR_RETRY_UNCACHED_REP:
4092 case -NFS4ERR_OLD_STATEID:
4093 task->tk_status = 0;
4094 return -EAGAIN;
4095 }
4096 task->tk_status = nfs4_map_errors(task->tk_status);
4097 return 0;
4098 wait_on_recovery:
4099 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
4100 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
4101 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
4102 task->tk_status = 0;
4103 return -EAGAIN;
4104 }
4105
4106 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
4107 nfs4_verifier *bootverf)
4108 {
4109 __be32 verf[2];
4110
4111 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
4112 /* An impossible timestamp guarantees this value
4113 * will never match a generated boot time. */
4114 verf[0] = 0;
4115 verf[1] = (__be32)(NSEC_PER_SEC + 1);
4116 } else {
4117 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
4118 verf[0] = (__be32)nn->boot_time.tv_sec;
4119 verf[1] = (__be32)nn->boot_time.tv_nsec;
4120 }
4121 memcpy(bootverf->data, verf, sizeof(bootverf->data));
4122 }
4123
4124 static unsigned int
4125 nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
4126 char *buf, size_t len)
4127 {
4128 unsigned int result;
4129
4130 rcu_read_lock();
4131 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
4132 clp->cl_ipaddr,
4133 rpc_peeraddr2str(clp->cl_rpcclient,
4134 RPC_DISPLAY_ADDR),
4135 rpc_peeraddr2str(clp->cl_rpcclient,
4136 RPC_DISPLAY_PROTO));
4137 rcu_read_unlock();
4138 return result;
4139 }
4140
4141 static unsigned int
4142 nfs4_init_uniform_client_string(const struct nfs_client *clp,
4143 char *buf, size_t len)
4144 {
4145 char *nodename = clp->cl_rpcclient->cl_nodename;
4146
4147 if (nfs4_client_id_uniquifier[0] != '\0')
4148 nodename = nfs4_client_id_uniquifier;
4149 return scnprintf(buf, len, "Linux NFSv%u.%u %s",
4150 clp->rpc_ops->version, clp->cl_minorversion,
4151 nodename);
4152 }
4153
4154 /**
4155 * nfs4_proc_setclientid - Negotiate client ID
4156 * @clp: state data structure
4157 * @program: RPC program for NFSv4 callback service
4158 * @port: IP port number for NFS4 callback service
4159 * @cred: RPC credential to use for this call
4160 * @res: where to place the result
4161 *
4162 * Returns zero, a negative errno, or a negative NFS4ERR status code.
4163 */
4164 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
4165 unsigned short port, struct rpc_cred *cred,
4166 struct nfs4_setclientid_res *res)
4167 {
4168 nfs4_verifier sc_verifier;
4169 struct nfs4_setclientid setclientid = {
4170 .sc_verifier = &sc_verifier,
4171 .sc_prog = program,
4172 .sc_cb_ident = clp->cl_cb_ident,
4173 };
4174 struct rpc_message msg = {
4175 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
4176 .rpc_argp = &setclientid,
4177 .rpc_resp = res,
4178 .rpc_cred = cred,
4179 };
4180 int status;
4181
4182 /* nfs_client_id4 */
4183 nfs4_init_boot_verifier(clp, &sc_verifier);
4184 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
4185 setclientid.sc_name_len =
4186 nfs4_init_uniform_client_string(clp,
4187 setclientid.sc_name,
4188 sizeof(setclientid.sc_name));
4189 else
4190 setclientid.sc_name_len =
4191 nfs4_init_nonuniform_client_string(clp,
4192 setclientid.sc_name,
4193 sizeof(setclientid.sc_name));
4194 /* cb_client4 */
4195 rcu_read_lock();
4196 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
4197 sizeof(setclientid.sc_netid),
4198 rpc_peeraddr2str(clp->cl_rpcclient,
4199 RPC_DISPLAY_NETID));
4200 rcu_read_unlock();
4201 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
4202 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
4203 clp->cl_ipaddr, port >> 8, port & 255);
4204
4205 dprintk("NFS call setclientid auth=%s, '%.*s'\n",
4206 clp->cl_rpcclient->cl_auth->au_ops->au_name,
4207 setclientid.sc_name_len, setclientid.sc_name);
4208 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4209 dprintk("NFS reply setclientid: %d\n", status);
4210 return status;
4211 }
4212
4213 /**
4214 * nfs4_proc_setclientid_confirm - Confirm client ID
4215 * @clp: state data structure
4216 * @res: result of a previous SETCLIENTID
4217 * @cred: RPC credential to use for this call
4218 *
4219 * Returns zero, a negative errno, or a negative NFS4ERR status code.
4220 */
4221 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
4222 struct nfs4_setclientid_res *arg,
4223 struct rpc_cred *cred)
4224 {
4225 struct nfs_fsinfo fsinfo;
4226 struct rpc_message msg = {
4227 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
4228 .rpc_argp = arg,
4229 .rpc_resp = &fsinfo,
4230 .rpc_cred = cred,
4231 };
4232 unsigned long now;
4233 int status;
4234
4235 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
4236 clp->cl_rpcclient->cl_auth->au_ops->au_name,
4237 clp->cl_clientid);
4238 now = jiffies;
4239 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4240 if (status == 0) {
4241 spin_lock(&clp->cl_lock);
4242 clp->cl_lease_time = fsinfo.lease_time * HZ;
4243 clp->cl_last_renewal = now;
4244 spin_unlock(&clp->cl_lock);
4245 }
4246 dprintk("NFS reply setclientid_confirm: %d\n", status);
4247 return status;
4248 }
4249
4250 struct nfs4_delegreturndata {
4251 struct nfs4_delegreturnargs args;
4252 struct nfs4_delegreturnres res;
4253 struct nfs_fh fh;
4254 nfs4_stateid stateid;
4255 unsigned long timestamp;
4256 struct nfs_fattr fattr;
4257 int rpc_status;
4258 };
4259
4260 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4261 {
4262 struct nfs4_delegreturndata *data = calldata;
4263
4264 if (!nfs4_sequence_done(task, &data->res.seq_res))
4265 return;
4266
4267 switch (task->tk_status) {
4268 case -NFS4ERR_STALE_STATEID:
4269 case -NFS4ERR_EXPIRED:
4270 case 0:
4271 renew_lease(data->res.server, data->timestamp);
4272 break;
4273 default:
4274 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4275 -EAGAIN) {
4276 rpc_restart_call_prepare(task);
4277 return;
4278 }
4279 }
4280 data->rpc_status = task->tk_status;
4281 }
4282
4283 static void nfs4_delegreturn_release(void *calldata)
4284 {
4285 kfree(calldata);
4286 }
4287
4288 #if defined(CONFIG_NFS_V4_1)
4289 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
4290 {
4291 struct nfs4_delegreturndata *d_data;
4292
4293 d_data = (struct nfs4_delegreturndata *)data;
4294
4295 if (nfs4_setup_sequence(d_data->res.server,
4296 &d_data->args.seq_args,
4297 &d_data->res.seq_res, task))
4298 return;
4299 rpc_call_start(task);
4300 }
4301 #endif /* CONFIG_NFS_V4_1 */
4302
4303 static const struct rpc_call_ops nfs4_delegreturn_ops = {
4304 #if defined(CONFIG_NFS_V4_1)
4305 .rpc_call_prepare = nfs4_delegreturn_prepare,
4306 #endif /* CONFIG_NFS_V4_1 */
4307 .rpc_call_done = nfs4_delegreturn_done,
4308 .rpc_release = nfs4_delegreturn_release,
4309 };
4310
4311 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4312 {
4313 struct nfs4_delegreturndata *data;
4314 struct nfs_server *server = NFS_SERVER(inode);
4315 struct rpc_task *task;
4316 struct rpc_message msg = {
4317 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
4318 .rpc_cred = cred,
4319 };
4320 struct rpc_task_setup task_setup_data = {
4321 .rpc_client = server->client,
4322 .rpc_message = &msg,
4323 .callback_ops = &nfs4_delegreturn_ops,
4324 .flags = RPC_TASK_ASYNC,
4325 };
4326 int status = 0;
4327
4328 data = kzalloc(sizeof(*data), GFP_NOFS);
4329 if (data == NULL)
4330 return -ENOMEM;
4331 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4332 data->args.fhandle = &data->fh;
4333 data->args.stateid = &data->stateid;
4334 data->args.bitmask = server->cache_consistency_bitmask;
4335 nfs_copy_fh(&data->fh, NFS_FH(inode));
4336 nfs4_stateid_copy(&data->stateid, stateid);
4337 data->res.fattr = &data->fattr;
4338 data->res.server = server;
4339 nfs_fattr_init(data->res.fattr);
4340 data->timestamp = jiffies;
4341 data->rpc_status = 0;
4342
4343 task_setup_data.callback_data = data;
4344 msg.rpc_argp = &data->args;
4345 msg.rpc_resp = &data->res;
4346 task = rpc_run_task(&task_setup_data);
4347 if (IS_ERR(task))
4348 return PTR_ERR(task);
4349 if (!issync)
4350 goto out;
4351 status = nfs4_wait_for_completion_rpc_task(task);
4352 if (status != 0)
4353 goto out;
4354 status = data->rpc_status;
4355 if (status == 0)
4356 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
4357 else
4358 nfs_refresh_inode(inode, &data->fattr);
4359 out:
4360 rpc_put_task(task);
4361 return status;
4362 }
4363
4364 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4365 {
4366 struct nfs_server *server = NFS_SERVER(inode);
4367 struct nfs4_exception exception = { };
4368 int err;
4369 do {
4370 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
4371 switch (err) {
4372 case -NFS4ERR_STALE_STATEID:
4373 case -NFS4ERR_EXPIRED:
4374 case 0:
4375 return 0;
4376 }
4377 err = nfs4_handle_exception(server, err, &exception);
4378 } while (exception.retry);
4379 return err;
4380 }
4381
4382 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4383 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4384
4385 /*
4386 * sleep, with exponential backoff, and retry the LOCK operation.
4387 */
4388 static unsigned long
4389 nfs4_set_lock_task_retry(unsigned long timeout)
4390 {
4391 freezable_schedule_timeout_killable(timeout);
4392 timeout <<= 1;
4393 if (timeout > NFS4_LOCK_MAXTIMEOUT)
4394 return NFS4_LOCK_MAXTIMEOUT;
4395 return timeout;
4396 }
4397
4398 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4399 {
4400 struct inode *inode = state->inode;
4401 struct nfs_server *server = NFS_SERVER(inode);
4402 struct nfs_client *clp = server->nfs_client;
4403 struct nfs_lockt_args arg = {
4404 .fh = NFS_FH(inode),
4405 .fl = request,
4406 };
4407 struct nfs_lockt_res res = {
4408 .denied = request,
4409 };
4410 struct rpc_message msg = {
4411 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
4412 .rpc_argp = &arg,
4413 .rpc_resp = &res,
4414 .rpc_cred = state->owner->so_cred,
4415 };
4416 struct nfs4_lock_state *lsp;
4417 int status;
4418
4419 arg.lock_owner.clientid = clp->cl_clientid;
4420 status = nfs4_set_lock_state(state, request);
4421 if (status != 0)
4422 goto out;
4423 lsp = request->fl_u.nfs4_fl.owner;
4424 arg.lock_owner.id = lsp->ls_seqid.owner_id;
4425 arg.lock_owner.s_dev = server->s_dev;
4426 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4427 switch (status) {
4428 case 0:
4429 request->fl_type = F_UNLCK;
4430 break;
4431 case -NFS4ERR_DENIED:
4432 status = 0;
4433 }
4434 request->fl_ops->fl_release_private(request);
4435 out:
4436 return status;
4437 }
4438
4439 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4440 {
4441 struct nfs4_exception exception = { };
4442 int err;
4443
4444 do {
4445 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4446 _nfs4_proc_getlk(state, cmd, request),
4447 &exception);
4448 } while (exception.retry);
4449 return err;
4450 }
4451
4452 static int do_vfs_lock(struct file *file, struct file_lock *fl)
4453 {
4454 int res = 0;
4455 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
4456 case FL_POSIX:
4457 res = posix_lock_file_wait(file, fl);
4458 break;
4459 case FL_FLOCK:
4460 res = flock_lock_file_wait(file, fl);
4461 break;
4462 default:
4463 BUG();
4464 }
4465 return res;
4466 }
4467
4468 struct nfs4_unlockdata {
4469 struct nfs_locku_args arg;
4470 struct nfs_locku_res res;
4471 struct nfs4_lock_state *lsp;
4472 struct nfs_open_context *ctx;
4473 struct file_lock fl;
4474 const struct nfs_server *server;
4475 unsigned long timestamp;
4476 };
4477
4478 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4479 struct nfs_open_context *ctx,
4480 struct nfs4_lock_state *lsp,
4481 struct nfs_seqid *seqid)
4482 {
4483 struct nfs4_unlockdata *p;
4484 struct inode *inode = lsp->ls_state->inode;
4485
4486 p = kzalloc(sizeof(*p), GFP_NOFS);
4487 if (p == NULL)
4488 return NULL;
4489 p->arg.fh = NFS_FH(inode);
4490 p->arg.fl = &p->fl;
4491 p->arg.seqid = seqid;
4492 p->res.seqid = seqid;
4493 p->arg.stateid = &lsp->ls_stateid;
4494 p->lsp = lsp;
4495 atomic_inc(&lsp->ls_count);
4496 /* Ensure we don't close file until we're done freeing locks! */
4497 p->ctx = get_nfs_open_context(ctx);
4498 memcpy(&p->fl, fl, sizeof(p->fl));
4499 p->server = NFS_SERVER(inode);
4500 return p;
4501 }
4502
4503 static void nfs4_locku_release_calldata(void *data)
4504 {
4505 struct nfs4_unlockdata *calldata = data;
4506 nfs_free_seqid(calldata->arg.seqid);
4507 nfs4_put_lock_state(calldata->lsp);
4508 put_nfs_open_context(calldata->ctx);
4509 kfree(calldata);
4510 }
4511
4512 static void nfs4_locku_done(struct rpc_task *task, void *data)
4513 {
4514 struct nfs4_unlockdata *calldata = data;
4515
4516 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4517 return;
4518 switch (task->tk_status) {
4519 case 0:
4520 nfs4_stateid_copy(&calldata->lsp->ls_stateid,
4521 &calldata->res.stateid);
4522 renew_lease(calldata->server, calldata->timestamp);
4523 break;
4524 case -NFS4ERR_BAD_STATEID:
4525 case -NFS4ERR_OLD_STATEID:
4526 case -NFS4ERR_STALE_STATEID:
4527 case -NFS4ERR_EXPIRED:
4528 break;
4529 default:
4530 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4531 rpc_restart_call_prepare(task);
4532 }
4533 nfs_release_seqid(calldata->arg.seqid);
4534 }
4535
4536 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4537 {
4538 struct nfs4_unlockdata *calldata = data;
4539
4540 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4541 return;
4542 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
4543 /* Note: exit _without_ running nfs4_locku_done */
4544 task->tk_action = NULL;
4545 return;
4546 }
4547 calldata->timestamp = jiffies;
4548 if (nfs4_setup_sequence(calldata->server,
4549 &calldata->arg.seq_args,
4550 &calldata->res.seq_res,
4551 task) != 0)
4552 nfs_release_seqid(calldata->arg.seqid);
4553 else
4554 rpc_call_start(task);
4555 }
4556
4557 static const struct rpc_call_ops nfs4_locku_ops = {
4558 .rpc_call_prepare = nfs4_locku_prepare,
4559 .rpc_call_done = nfs4_locku_done,
4560 .rpc_release = nfs4_locku_release_calldata,
4561 };
4562
4563 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4564 struct nfs_open_context *ctx,
4565 struct nfs4_lock_state *lsp,
4566 struct nfs_seqid *seqid)
4567 {
4568 struct nfs4_unlockdata *data;
4569 struct rpc_message msg = {
4570 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4571 .rpc_cred = ctx->cred,
4572 };
4573 struct rpc_task_setup task_setup_data = {
4574 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4575 .rpc_message = &msg,
4576 .callback_ops = &nfs4_locku_ops,
4577 .workqueue = nfsiod_workqueue,
4578 .flags = RPC_TASK_ASYNC,
4579 };
4580
4581 /* Ensure this is an unlock - when canceling a lock, the
4582 * canceled lock is passed in, and it won't be an unlock.
4583 */
4584 fl->fl_type = F_UNLCK;
4585
4586 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4587 if (data == NULL) {
4588 nfs_free_seqid(seqid);
4589 return ERR_PTR(-ENOMEM);
4590 }
4591
4592 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4593 msg.rpc_argp = &data->arg;
4594 msg.rpc_resp = &data->res;
4595 task_setup_data.callback_data = data;
4596 return rpc_run_task(&task_setup_data);
4597 }
4598
4599 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4600 {
4601 struct nfs_inode *nfsi = NFS_I(state->inode);
4602 struct nfs_seqid *seqid;
4603 struct nfs4_lock_state *lsp;
4604 struct rpc_task *task;
4605 int status = 0;
4606 unsigned char fl_flags = request->fl_flags;
4607
4608 status = nfs4_set_lock_state(state, request);
4609 /* Unlock _before_ we do the RPC call */
4610 request->fl_flags |= FL_EXISTS;
4611 down_read(&nfsi->rwsem);
4612 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4613 up_read(&nfsi->rwsem);
4614 goto out;
4615 }
4616 up_read(&nfsi->rwsem);
4617 if (status != 0)
4618 goto out;
4619 /* Is this a delegated lock? */
4620 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
4621 goto out;
4622 lsp = request->fl_u.nfs4_fl.owner;
4623 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4624 status = -ENOMEM;
4625 if (seqid == NULL)
4626 goto out;
4627 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4628 status = PTR_ERR(task);
4629 if (IS_ERR(task))
4630 goto out;
4631 status = nfs4_wait_for_completion_rpc_task(task);
4632 rpc_put_task(task);
4633 out:
4634 request->fl_flags = fl_flags;
4635 return status;
4636 }
4637
4638 struct nfs4_lockdata {
4639 struct nfs_lock_args arg;
4640 struct nfs_lock_res res;
4641 struct nfs4_lock_state *lsp;
4642 struct nfs_open_context *ctx;
4643 struct file_lock fl;
4644 unsigned long timestamp;
4645 int rpc_status;
4646 int cancelled;
4647 struct nfs_server *server;
4648 };
4649
4650 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4651 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4652 gfp_t gfp_mask)
4653 {
4654 struct nfs4_lockdata *p;
4655 struct inode *inode = lsp->ls_state->inode;
4656 struct nfs_server *server = NFS_SERVER(inode);
4657
4658 p = kzalloc(sizeof(*p), gfp_mask);
4659 if (p == NULL)
4660 return NULL;
4661
4662 p->arg.fh = NFS_FH(inode);
4663 p->arg.fl = &p->fl;
4664 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4665 if (p->arg.open_seqid == NULL)
4666 goto out_free;
4667 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4668 if (p->arg.lock_seqid == NULL)
4669 goto out_free_seqid;
4670 p->arg.lock_stateid = &lsp->ls_stateid;
4671 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4672 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
4673 p->arg.lock_owner.s_dev = server->s_dev;
4674 p->res.lock_seqid = p->arg.lock_seqid;
4675 p->lsp = lsp;
4676 p->server = server;
4677 atomic_inc(&lsp->ls_count);
4678 p->ctx = get_nfs_open_context(ctx);
4679 memcpy(&p->fl, fl, sizeof(p->fl));
4680 return p;
4681 out_free_seqid:
4682 nfs_free_seqid(p->arg.open_seqid);
4683 out_free:
4684 kfree(p);
4685 return NULL;
4686 }
4687
4688 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4689 {
4690 struct nfs4_lockdata *data = calldata;
4691 struct nfs4_state *state = data->lsp->ls_state;
4692
4693 dprintk("%s: begin!\n", __func__);
4694 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4695 return;
4696 /* Do we need to do an open_to_lock_owner? */
4697 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4698 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
4699 goto out_release_lock_seqid;
4700 data->arg.open_stateid = &state->stateid;
4701 data->arg.new_lock_owner = 1;
4702 data->res.open_seqid = data->arg.open_seqid;
4703 } else
4704 data->arg.new_lock_owner = 0;
4705 data->timestamp = jiffies;
4706 if (nfs4_setup_sequence(data->server,
4707 &data->arg.seq_args,
4708 &data->res.seq_res,
4709 task) == 0) {
4710 rpc_call_start(task);
4711 return;
4712 }
4713 nfs_release_seqid(data->arg.open_seqid);
4714 out_release_lock_seqid:
4715 nfs_release_seqid(data->arg.lock_seqid);
4716 dprintk("%s: done!, ret = %d\n", __func__, task->tk_status);
4717 }
4718
4719 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
4720 {
4721 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4722 nfs4_lock_prepare(task, calldata);
4723 }
4724
4725 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4726 {
4727 struct nfs4_lockdata *data = calldata;
4728
4729 dprintk("%s: begin!\n", __func__);
4730
4731 if (!nfs4_sequence_done(task, &data->res.seq_res))
4732 return;
4733
4734 data->rpc_status = task->tk_status;
4735 if (data->arg.new_lock_owner != 0) {
4736 if (data->rpc_status == 0)
4737 nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4738 else
4739 goto out;
4740 }
4741 if (data->rpc_status == 0) {
4742 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
4743 set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags);
4744 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
4745 }
4746 out:
4747 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4748 }
4749
4750 static void nfs4_lock_release(void *calldata)
4751 {
4752 struct nfs4_lockdata *data = calldata;
4753
4754 dprintk("%s: begin!\n", __func__);
4755 nfs_free_seqid(data->arg.open_seqid);
4756 if (data->cancelled != 0) {
4757 struct rpc_task *task;
4758 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4759 data->arg.lock_seqid);
4760 if (!IS_ERR(task))
4761 rpc_put_task_async(task);
4762 dprintk("%s: cancelling lock!\n", __func__);
4763 } else
4764 nfs_free_seqid(data->arg.lock_seqid);
4765 nfs4_put_lock_state(data->lsp);
4766 put_nfs_open_context(data->ctx);
4767 kfree(data);
4768 dprintk("%s: done!\n", __func__);
4769 }
4770
4771 static const struct rpc_call_ops nfs4_lock_ops = {
4772 .rpc_call_prepare = nfs4_lock_prepare,
4773 .rpc_call_done = nfs4_lock_done,
4774 .rpc_release = nfs4_lock_release,
4775 };
4776
4777 static const struct rpc_call_ops nfs4_recover_lock_ops = {
4778 .rpc_call_prepare = nfs4_recover_lock_prepare,
4779 .rpc_call_done = nfs4_lock_done,
4780 .rpc_release = nfs4_lock_release,
4781 };
4782
4783 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4784 {
4785 switch (error) {
4786 case -NFS4ERR_ADMIN_REVOKED:
4787 case -NFS4ERR_BAD_STATEID:
4788 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4789 if (new_lock_owner != 0 ||
4790 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
4791 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4792 break;
4793 case -NFS4ERR_STALE_STATEID:
4794 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4795 case -NFS4ERR_EXPIRED:
4796 nfs4_schedule_lease_recovery(server->nfs_client);
4797 };
4798 }
4799
4800 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4801 {
4802 struct nfs4_lockdata *data;
4803 struct rpc_task *task;
4804 struct rpc_message msg = {
4805 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4806 .rpc_cred = state->owner->so_cred,
4807 };
4808 struct rpc_task_setup task_setup_data = {
4809 .rpc_client = NFS_CLIENT(state->inode),
4810 .rpc_message = &msg,
4811 .callback_ops = &nfs4_lock_ops,
4812 .workqueue = nfsiod_workqueue,
4813 .flags = RPC_TASK_ASYNC,
4814 };
4815 int ret;
4816
4817 dprintk("%s: begin!\n", __func__);
4818 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4819 fl->fl_u.nfs4_fl.owner,
4820 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4821 if (data == NULL)
4822 return -ENOMEM;
4823 if (IS_SETLKW(cmd))
4824 data->arg.block = 1;
4825 if (recovery_type > NFS_LOCK_NEW) {
4826 if (recovery_type == NFS_LOCK_RECLAIM)
4827 data->arg.reclaim = NFS_LOCK_RECLAIM;
4828 task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4829 }
4830 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4831 msg.rpc_argp = &data->arg;
4832 msg.rpc_resp = &data->res;
4833 task_setup_data.callback_data = data;
4834 task = rpc_run_task(&task_setup_data);
4835 if (IS_ERR(task))
4836 return PTR_ERR(task);
4837 ret = nfs4_wait_for_completion_rpc_task(task);
4838 if (ret == 0) {
4839 ret = data->rpc_status;
4840 if (ret)
4841 nfs4_handle_setlk_error(data->server, data->lsp,
4842 data->arg.new_lock_owner, ret);
4843 } else
4844 data->cancelled = 1;
4845 rpc_put_task(task);
4846 dprintk("%s: done, ret = %d!\n", __func__, ret);
4847 return ret;
4848 }
4849
4850 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4851 {
4852 struct nfs_server *server = NFS_SERVER(state->inode);
4853 struct nfs4_exception exception = {
4854 .inode = state->inode,
4855 };
4856 int err;
4857
4858 do {
4859 /* Cache the lock if possible... */
4860 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4861 return 0;
4862 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4863 if (err != -NFS4ERR_DELAY)
4864 break;
4865 nfs4_handle_exception(server, err, &exception);
4866 } while (exception.retry);
4867 return err;
4868 }
4869
4870 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4871 {
4872 struct nfs_server *server = NFS_SERVER(state->inode);
4873 struct nfs4_exception exception = {
4874 .inode = state->inode,
4875 };
4876 int err;
4877
4878 err = nfs4_set_lock_state(state, request);
4879 if (err != 0)
4880 return err;
4881 do {
4882 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4883 return 0;
4884 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
4885 switch (err) {
4886 default:
4887 goto out;
4888 case -NFS4ERR_GRACE:
4889 case -NFS4ERR_DELAY:
4890 nfs4_handle_exception(server, err, &exception);
4891 err = 0;
4892 }
4893 } while (exception.retry);
4894 out:
4895 return err;
4896 }
4897
4898 #if defined(CONFIG_NFS_V4_1)
4899 /**
4900 * nfs41_check_expired_locks - possibly free a lock stateid
4901 *
4902 * @state: NFSv4 state for an inode
4903 *
4904 * Returns NFS_OK if recovery for this stateid is now finished.
4905 * Otherwise a negative NFS4ERR value is returned.
4906 */
4907 static int nfs41_check_expired_locks(struct nfs4_state *state)
4908 {
4909 int status, ret = -NFS4ERR_BAD_STATEID;
4910 struct nfs4_lock_state *lsp;
4911 struct nfs_server *server = NFS_SERVER(state->inode);
4912
4913 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
4914 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
4915 status = nfs41_test_stateid(server, &lsp->ls_stateid);
4916 if (status != NFS_OK) {
4917 /* Free the stateid unless the server
4918 * informs us the stateid is unrecognized. */
4919 if (status != -NFS4ERR_BAD_STATEID)
4920 nfs41_free_stateid(server,
4921 &lsp->ls_stateid);
4922 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
4923 ret = status;
4924 }
4925 }
4926 };
4927
4928 return ret;
4929 }
4930
4931 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
4932 {
4933 int status = NFS_OK;
4934
4935 if (test_bit(LK_STATE_IN_USE, &state->flags))
4936 status = nfs41_check_expired_locks(state);
4937 if (status != NFS_OK)
4938 status = nfs4_lock_expired(state, request);
4939 return status;
4940 }
4941 #endif
4942
4943 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4944 {
4945 struct nfs_inode *nfsi = NFS_I(state->inode);
4946 unsigned char fl_flags = request->fl_flags;
4947 int status = -ENOLCK;
4948
4949 if ((fl_flags & FL_POSIX) &&
4950 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4951 goto out;
4952 /* Is this a delegated open? */
4953 status = nfs4_set_lock_state(state, request);
4954 if (status != 0)
4955 goto out;
4956 request->fl_flags |= FL_ACCESS;
4957 status = do_vfs_lock(request->fl_file, request);
4958 if (status < 0)
4959 goto out;
4960 down_read(&nfsi->rwsem);
4961 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4962 /* Yes: cache locks! */
4963 /* ...but avoid races with delegation recall... */
4964 request->fl_flags = fl_flags & ~FL_SLEEP;
4965 status = do_vfs_lock(request->fl_file, request);
4966 goto out_unlock;
4967 }
4968 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
4969 if (status != 0)
4970 goto out_unlock;
4971 /* Note: we always want to sleep here! */
4972 request->fl_flags = fl_flags | FL_SLEEP;
4973 if (do_vfs_lock(request->fl_file, request) < 0)
4974 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
4975 "manager!\n", __func__);
4976 out_unlock:
4977 up_read(&nfsi->rwsem);
4978 out:
4979 request->fl_flags = fl_flags;
4980 return status;
4981 }
4982
4983 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4984 {
4985 struct nfs4_exception exception = {
4986 .state = state,
4987 .inode = state->inode,
4988 };
4989 int err;
4990
4991 do {
4992 err = _nfs4_proc_setlk(state, cmd, request);
4993 if (err == -NFS4ERR_DENIED)
4994 err = -EAGAIN;
4995 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4996 err, &exception);
4997 } while (exception.retry);
4998 return err;
4999 }
5000
5001 static int
5002 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
5003 {
5004 struct nfs_open_context *ctx;
5005 struct nfs4_state *state;
5006 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
5007 int status;
5008
5009 /* verify open state */
5010 ctx = nfs_file_open_context(filp);
5011 state = ctx->state;
5012
5013 if (request->fl_start < 0 || request->fl_end < 0)
5014 return -EINVAL;
5015
5016 if (IS_GETLK(cmd)) {
5017 if (state != NULL)
5018 return nfs4_proc_getlk(state, F_GETLK, request);
5019 return 0;
5020 }
5021
5022 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
5023 return -EINVAL;
5024
5025 if (request->fl_type == F_UNLCK) {
5026 if (state != NULL)
5027 return nfs4_proc_unlck(state, cmd, request);
5028 return 0;
5029 }
5030
5031 if (state == NULL)
5032 return -ENOLCK;
5033 /*
5034 * Don't rely on the VFS having checked the file open mode,
5035 * since it won't do this for flock() locks.
5036 */
5037 switch (request->fl_type) {
5038 case F_RDLCK:
5039 if (!(filp->f_mode & FMODE_READ))
5040 return -EBADF;
5041 break;
5042 case F_WRLCK:
5043 if (!(filp->f_mode & FMODE_WRITE))
5044 return -EBADF;
5045 }
5046
5047 do {
5048 status = nfs4_proc_setlk(state, cmd, request);
5049 if ((status != -EAGAIN) || IS_SETLK(cmd))
5050 break;
5051 timeout = nfs4_set_lock_task_retry(timeout);
5052 status = -ERESTARTSYS;
5053 if (signalled())
5054 break;
5055 } while(status < 0);
5056 return status;
5057 }
5058
5059 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
5060 {
5061 struct nfs_server *server = NFS_SERVER(state->inode);
5062 struct nfs4_exception exception = { };
5063 int err;
5064
5065 err = nfs4_set_lock_state(state, fl);
5066 if (err != 0)
5067 goto out;
5068 do {
5069 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
5070 switch (err) {
5071 default:
5072 printk(KERN_ERR "NFS: %s: unhandled error "
5073 "%d.\n", __func__, err);
5074 case 0:
5075 case -ESTALE:
5076 goto out;
5077 case -NFS4ERR_EXPIRED:
5078 nfs4_schedule_stateid_recovery(server, state);
5079 case -NFS4ERR_STALE_CLIENTID:
5080 case -NFS4ERR_STALE_STATEID:
5081 nfs4_schedule_lease_recovery(server->nfs_client);
5082 goto out;
5083 case -NFS4ERR_BADSESSION:
5084 case -NFS4ERR_BADSLOT:
5085 case -NFS4ERR_BAD_HIGH_SLOT:
5086 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
5087 case -NFS4ERR_DEADSESSION:
5088 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
5089 goto out;
5090 case -ERESTARTSYS:
5091 /*
5092 * The show must go on: exit, but mark the
5093 * stateid as needing recovery.
5094 */
5095 case -NFS4ERR_DELEG_REVOKED:
5096 case -NFS4ERR_ADMIN_REVOKED:
5097 case -NFS4ERR_BAD_STATEID:
5098 case -NFS4ERR_OPENMODE:
5099 nfs4_schedule_stateid_recovery(server, state);
5100 err = 0;
5101 goto out;
5102 case -EKEYEXPIRED:
5103 /*
5104 * User RPCSEC_GSS context has expired.
5105 * We cannot recover this stateid now, so
5106 * skip it and allow recovery thread to
5107 * proceed.
5108 */
5109 err = 0;
5110 goto out;
5111 case -ENOMEM:
5112 case -NFS4ERR_DENIED:
5113 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
5114 err = 0;
5115 goto out;
5116 case -NFS4ERR_DELAY:
5117 break;
5118 }
5119 err = nfs4_handle_exception(server, err, &exception);
5120 } while (exception.retry);
5121 out:
5122 return err;
5123 }
5124
5125 struct nfs_release_lockowner_data {
5126 struct nfs4_lock_state *lsp;
5127 struct nfs_server *server;
5128 struct nfs_release_lockowner_args args;
5129 };
5130
5131 static void nfs4_release_lockowner_release(void *calldata)
5132 {
5133 struct nfs_release_lockowner_data *data = calldata;
5134 nfs4_free_lock_state(data->server, data->lsp);
5135 kfree(calldata);
5136 }
5137
5138 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
5139 .rpc_release = nfs4_release_lockowner_release,
5140 };
5141
5142 int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
5143 {
5144 struct nfs_server *server = lsp->ls_state->owner->so_server;
5145 struct nfs_release_lockowner_data *data;
5146 struct rpc_message msg = {
5147 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
5148 };
5149
5150 if (server->nfs_client->cl_mvops->minor_version != 0)
5151 return -EINVAL;
5152 data = kmalloc(sizeof(*data), GFP_NOFS);
5153 if (!data)
5154 return -ENOMEM;
5155 data->lsp = lsp;
5156 data->server = server;
5157 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
5158 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
5159 data->args.lock_owner.s_dev = server->s_dev;
5160 msg.rpc_argp = &data->args;
5161 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
5162 return 0;
5163 }
5164
5165 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
5166
5167 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
5168 const void *buf, size_t buflen,
5169 int flags, int type)
5170 {
5171 if (strcmp(key, "") != 0)
5172 return -EINVAL;
5173
5174 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
5175 }
5176
5177 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
5178 void *buf, size_t buflen, int type)
5179 {
5180 if (strcmp(key, "") != 0)
5181 return -EINVAL;
5182
5183 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
5184 }
5185
5186 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
5187 size_t list_len, const char *name,
5188 size_t name_len, int type)
5189 {
5190 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
5191
5192 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
5193 return 0;
5194
5195 if (list && len <= list_len)
5196 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
5197 return len;
5198 }
5199
5200 /*
5201 * nfs_fhget will use either the mounted_on_fileid or the fileid
5202 */
5203 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
5204 {
5205 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
5206 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
5207 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
5208 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
5209 return;
5210
5211 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
5212 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
5213 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
5214 fattr->nlink = 2;
5215 }
5216
5217 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5218 const struct qstr *name,
5219 struct nfs4_fs_locations *fs_locations,
5220 struct page *page)
5221 {
5222 struct nfs_server *server = NFS_SERVER(dir);
5223 u32 bitmask[2] = {
5224 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
5225 };
5226 struct nfs4_fs_locations_arg args = {
5227 .dir_fh = NFS_FH(dir),
5228 .name = name,
5229 .page = page,
5230 .bitmask = bitmask,
5231 };
5232 struct nfs4_fs_locations_res res = {
5233 .fs_locations = fs_locations,
5234 };
5235 struct rpc_message msg = {
5236 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
5237 .rpc_argp = &args,
5238 .rpc_resp = &res,
5239 };
5240 int status;
5241
5242 dprintk("%s: start\n", __func__);
5243
5244 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
5245 * is not supported */
5246 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
5247 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
5248 else
5249 bitmask[0] |= FATTR4_WORD0_FILEID;
5250
5251 nfs_fattr_init(&fs_locations->fattr);
5252 fs_locations->server = server;
5253 fs_locations->nlocations = 0;
5254 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
5255 dprintk("%s: returned status = %d\n", __func__, status);
5256 return status;
5257 }
5258
5259 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5260 const struct qstr *name,
5261 struct nfs4_fs_locations *fs_locations,
5262 struct page *page)
5263 {
5264 struct nfs4_exception exception = { };
5265 int err;
5266 do {
5267 err = nfs4_handle_exception(NFS_SERVER(dir),
5268 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
5269 &exception);
5270 } while (exception.retry);
5271 return err;
5272 }
5273
5274 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
5275 {
5276 int status;
5277 struct nfs4_secinfo_arg args = {
5278 .dir_fh = NFS_FH(dir),
5279 .name = name,
5280 };
5281 struct nfs4_secinfo_res res = {
5282 .flavors = flavors,
5283 };
5284 struct rpc_message msg = {
5285 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
5286 .rpc_argp = &args,
5287 .rpc_resp = &res,
5288 };
5289
5290 dprintk("NFS call secinfo %s\n", name->name);
5291 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
5292 dprintk("NFS reply secinfo: %d\n", status);
5293 return status;
5294 }
5295
5296 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
5297 struct nfs4_secinfo_flavors *flavors)
5298 {
5299 struct nfs4_exception exception = { };
5300 int err;
5301 do {
5302 err = nfs4_handle_exception(NFS_SERVER(dir),
5303 _nfs4_proc_secinfo(dir, name, flavors),
5304 &exception);
5305 } while (exception.retry);
5306 return err;
5307 }
5308
5309 #ifdef CONFIG_NFS_V4_1
5310 /*
5311 * Check the exchange flags returned by the server for invalid flags, having
5312 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
5313 * DS flags set.
5314 */
5315 static int nfs4_check_cl_exchange_flags(u32 flags)
5316 {
5317 if (flags & ~EXCHGID4_FLAG_MASK_R)
5318 goto out_inval;
5319 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
5320 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
5321 goto out_inval;
5322 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
5323 goto out_inval;
5324 return NFS_OK;
5325 out_inval:
5326 return -NFS4ERR_INVAL;
5327 }
5328
5329 static bool
5330 nfs41_same_server_scope(struct nfs41_server_scope *a,
5331 struct nfs41_server_scope *b)
5332 {
5333 if (a->server_scope_sz == b->server_scope_sz &&
5334 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
5335 return true;
5336
5337 return false;
5338 }
5339
5340 /*
5341 * nfs4_proc_bind_conn_to_session()
5342 *
5343 * The 4.1 client currently uses the same TCP connection for the
5344 * fore and backchannel.
5345 */
5346 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
5347 {
5348 int status;
5349 struct nfs41_bind_conn_to_session_res res;
5350 struct rpc_message msg = {
5351 .rpc_proc =
5352 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
5353 .rpc_argp = clp,
5354 .rpc_resp = &res,
5355 .rpc_cred = cred,
5356 };
5357
5358 dprintk("--> %s\n", __func__);
5359
5360 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5361 if (unlikely(res.session == NULL)) {
5362 status = -ENOMEM;
5363 goto out;
5364 }
5365
5366 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5367 if (status == 0) {
5368 if (memcmp(res.session->sess_id.data,
5369 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
5370 dprintk("NFS: %s: Session ID mismatch\n", __func__);
5371 status = -EIO;
5372 goto out_session;
5373 }
5374 if (res.dir != NFS4_CDFS4_BOTH) {
5375 dprintk("NFS: %s: Unexpected direction from server\n",
5376 __func__);
5377 status = -EIO;
5378 goto out_session;
5379 }
5380 if (res.use_conn_in_rdma_mode) {
5381 dprintk("NFS: %s: Server returned RDMA mode = true\n",
5382 __func__);
5383 status = -EIO;
5384 goto out_session;
5385 }
5386 }
5387 out_session:
5388 kfree(res.session);
5389 out:
5390 dprintk("<-- %s status= %d\n", __func__, status);
5391 return status;
5392 }
5393
5394 /*
5395 * nfs4_proc_exchange_id()
5396 *
5397 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5398 *
5399 * Since the clientid has expired, all compounds using sessions
5400 * associated with the stale clientid will be returning
5401 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
5402 * be in some phase of session reset.
5403 */
5404 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5405 {
5406 nfs4_verifier verifier;
5407 struct nfs41_exchange_id_args args = {
5408 .verifier = &verifier,
5409 .client = clp,
5410 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
5411 };
5412 struct nfs41_exchange_id_res res = {
5413 0
5414 };
5415 int status;
5416 struct rpc_message msg = {
5417 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
5418 .rpc_argp = &args,
5419 .rpc_resp = &res,
5420 .rpc_cred = cred,
5421 };
5422
5423 nfs4_init_boot_verifier(clp, &verifier);
5424 args.id_len = nfs4_init_uniform_client_string(clp, args.id,
5425 sizeof(args.id));
5426 dprintk("NFS call exchange_id auth=%s, '%.*s'\n",
5427 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5428 args.id_len, args.id);
5429
5430 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
5431 GFP_NOFS);
5432 if (unlikely(res.server_owner == NULL)) {
5433 status = -ENOMEM;
5434 goto out;
5435 }
5436
5437 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
5438 GFP_NOFS);
5439 if (unlikely(res.server_scope == NULL)) {
5440 status = -ENOMEM;
5441 goto out_server_owner;
5442 }
5443
5444 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
5445 if (unlikely(res.impl_id == NULL)) {
5446 status = -ENOMEM;
5447 goto out_server_scope;
5448 }
5449
5450 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5451 if (status == 0)
5452 status = nfs4_check_cl_exchange_flags(res.flags);
5453
5454 if (status == 0) {
5455 clp->cl_clientid = res.clientid;
5456 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
5457 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
5458 clp->cl_seqid = res.seqid;
5459
5460 kfree(clp->cl_serverowner);
5461 clp->cl_serverowner = res.server_owner;
5462 res.server_owner = NULL;
5463
5464 /* use the most recent implementation id */
5465 kfree(clp->cl_implid);
5466 clp->cl_implid = res.impl_id;
5467
5468 if (clp->cl_serverscope != NULL &&
5469 !nfs41_same_server_scope(clp->cl_serverscope,
5470 res.server_scope)) {
5471 dprintk("%s: server_scope mismatch detected\n",
5472 __func__);
5473 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5474 kfree(clp->cl_serverscope);
5475 clp->cl_serverscope = NULL;
5476 }
5477
5478 if (clp->cl_serverscope == NULL) {
5479 clp->cl_serverscope = res.server_scope;
5480 goto out;
5481 }
5482 } else
5483 kfree(res.impl_id);
5484
5485 out_server_owner:
5486 kfree(res.server_owner);
5487 out_server_scope:
5488 kfree(res.server_scope);
5489 out:
5490 if (clp->cl_implid != NULL)
5491 dprintk("NFS reply exchange_id: Server Implementation ID: "
5492 "domain: %s, name: %s, date: %llu,%u\n",
5493 clp->cl_implid->domain, clp->cl_implid->name,
5494 clp->cl_implid->date.seconds,
5495 clp->cl_implid->date.nseconds);
5496 dprintk("NFS reply exchange_id: %d\n", status);
5497 return status;
5498 }
5499
5500 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
5501 struct rpc_cred *cred)
5502 {
5503 struct rpc_message msg = {
5504 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
5505 .rpc_argp = clp,
5506 .rpc_cred = cred,
5507 };
5508 int status;
5509
5510 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5511 if (status)
5512 dprintk("NFS: Got error %d from the server %s on "
5513 "DESTROY_CLIENTID.", status, clp->cl_hostname);
5514 return status;
5515 }
5516
5517 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
5518 struct rpc_cred *cred)
5519 {
5520 unsigned int loop;
5521 int ret;
5522
5523 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
5524 ret = _nfs4_proc_destroy_clientid(clp, cred);
5525 switch (ret) {
5526 case -NFS4ERR_DELAY:
5527 case -NFS4ERR_CLIENTID_BUSY:
5528 ssleep(1);
5529 break;
5530 default:
5531 return ret;
5532 }
5533 }
5534 return 0;
5535 }
5536
5537 int nfs4_destroy_clientid(struct nfs_client *clp)
5538 {
5539 struct rpc_cred *cred;
5540 int ret = 0;
5541
5542 if (clp->cl_mvops->minor_version < 1)
5543 goto out;
5544 if (clp->cl_exchange_flags == 0)
5545 goto out;
5546 if (clp->cl_preserve_clid)
5547 goto out;
5548 cred = nfs4_get_exchange_id_cred(clp);
5549 ret = nfs4_proc_destroy_clientid(clp, cred);
5550 if (cred)
5551 put_rpccred(cred);
5552 switch (ret) {
5553 case 0:
5554 case -NFS4ERR_STALE_CLIENTID:
5555 clp->cl_exchange_flags = 0;
5556 }
5557 out:
5558 return ret;
5559 }
5560
5561 struct nfs4_get_lease_time_data {
5562 struct nfs4_get_lease_time_args *args;
5563 struct nfs4_get_lease_time_res *res;
5564 struct nfs_client *clp;
5565 };
5566
5567 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
5568 void *calldata)
5569 {
5570 int ret;
5571 struct nfs4_get_lease_time_data *data =
5572 (struct nfs4_get_lease_time_data *)calldata;
5573
5574 dprintk("--> %s\n", __func__);
5575 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5576 /* just setup sequence, do not trigger session recovery
5577 since we're invoked within one */
5578 ret = nfs41_setup_sequence(data->clp->cl_session,
5579 &data->args->la_seq_args,
5580 &data->res->lr_seq_res, task);
5581
5582 if (ret != -EAGAIN)
5583 rpc_call_start(task);
5584 dprintk("<-- %s\n", __func__);
5585 }
5586
5587 /*
5588 * Called from nfs4_state_manager thread for session setup, so don't recover
5589 * from sequence operation or clientid errors.
5590 */
5591 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
5592 {
5593 struct nfs4_get_lease_time_data *data =
5594 (struct nfs4_get_lease_time_data *)calldata;
5595
5596 dprintk("--> %s\n", __func__);
5597 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
5598 return;
5599 switch (task->tk_status) {
5600 case -NFS4ERR_DELAY:
5601 case -NFS4ERR_GRACE:
5602 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
5603 rpc_delay(task, NFS4_POLL_RETRY_MIN);
5604 task->tk_status = 0;
5605 /* fall through */
5606 case -NFS4ERR_RETRY_UNCACHED_REP:
5607 rpc_restart_call_prepare(task);
5608 return;
5609 }
5610 dprintk("<-- %s\n", __func__);
5611 }
5612
5613 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
5614 .rpc_call_prepare = nfs4_get_lease_time_prepare,
5615 .rpc_call_done = nfs4_get_lease_time_done,
5616 };
5617
5618 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
5619 {
5620 struct rpc_task *task;
5621 struct nfs4_get_lease_time_args args;
5622 struct nfs4_get_lease_time_res res = {
5623 .lr_fsinfo = fsinfo,
5624 };
5625 struct nfs4_get_lease_time_data data = {
5626 .args = &args,
5627 .res = &res,
5628 .clp = clp,
5629 };
5630 struct rpc_message msg = {
5631 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
5632 .rpc_argp = &args,
5633 .rpc_resp = &res,
5634 };
5635 struct rpc_task_setup task_setup = {
5636 .rpc_client = clp->cl_rpcclient,
5637 .rpc_message = &msg,
5638 .callback_ops = &nfs4_get_lease_time_ops,
5639 .callback_data = &data,
5640 .flags = RPC_TASK_TIMEOUT,
5641 };
5642 int status;
5643
5644 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
5645 dprintk("--> %s\n", __func__);
5646 task = rpc_run_task(&task_setup);
5647
5648 if (IS_ERR(task))
5649 status = PTR_ERR(task);
5650 else {
5651 status = task->tk_status;
5652 rpc_put_task(task);
5653 }
5654 dprintk("<-- %s return %d\n", __func__, status);
5655
5656 return status;
5657 }
5658
5659 static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags)
5660 {
5661 return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags);
5662 }
5663
5664 static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl,
5665 struct nfs4_slot *new,
5666 u32 max_slots,
5667 u32 ivalue)
5668 {
5669 struct nfs4_slot *old = NULL;
5670 u32 i;
5671
5672 spin_lock(&tbl->slot_tbl_lock);
5673 if (new) {
5674 old = tbl->slots;
5675 tbl->slots = new;
5676 tbl->max_slots = max_slots;
5677 }
5678 tbl->highest_used_slotid = NFS4_NO_SLOT;
5679 for (i = 0; i < tbl->max_slots; i++)
5680 tbl->slots[i].seq_nr = ivalue;
5681 spin_unlock(&tbl->slot_tbl_lock);
5682 kfree(old);
5683 }
5684
5685 /*
5686 * (re)Initialise a slot table
5687 */
5688 static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
5689 u32 ivalue)
5690 {
5691 struct nfs4_slot *new = NULL;
5692 int ret = -ENOMEM;
5693
5694 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
5695 max_reqs, tbl->max_slots);
5696
5697 /* Does the newly negotiated max_reqs match the existing slot table? */
5698 if (max_reqs != tbl->max_slots) {
5699 new = nfs4_alloc_slots(max_reqs, GFP_NOFS);
5700 if (!new)
5701 goto out;
5702 }
5703 ret = 0;
5704
5705 nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue);
5706 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
5707 tbl, tbl->slots, tbl->max_slots);
5708 out:
5709 dprintk("<-- %s: return %d\n", __func__, ret);
5710 return ret;
5711 }
5712
5713 /* Destroy the slot table */
5714 static void nfs4_destroy_slot_tables(struct nfs4_session *session)
5715 {
5716 if (session->fc_slot_table.slots != NULL) {
5717 kfree(session->fc_slot_table.slots);
5718 session->fc_slot_table.slots = NULL;
5719 }
5720 if (session->bc_slot_table.slots != NULL) {
5721 kfree(session->bc_slot_table.slots);
5722 session->bc_slot_table.slots = NULL;
5723 }
5724 return;
5725 }
5726
5727 /*
5728 * Initialize or reset the forechannel and backchannel tables
5729 */
5730 static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
5731 {
5732 struct nfs4_slot_table *tbl;
5733 int status;
5734
5735 dprintk("--> %s\n", __func__);
5736 /* Fore channel */
5737 tbl = &ses->fc_slot_table;
5738 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
5739 if (status) /* -ENOMEM */
5740 return status;
5741 /* Back channel */
5742 tbl = &ses->bc_slot_table;
5743 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
5744 if (status && tbl->slots == NULL)
5745 /* Fore and back channel share a connection so get
5746 * both slot tables or neither */
5747 nfs4_destroy_slot_tables(ses);
5748 return status;
5749 }
5750
5751 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
5752 {
5753 struct nfs4_session *session;
5754 struct nfs4_slot_table *tbl;
5755
5756 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5757 if (!session)
5758 return NULL;
5759
5760 tbl = &session->fc_slot_table;
5761 tbl->highest_used_slotid = NFS4_NO_SLOT;
5762 spin_lock_init(&tbl->slot_tbl_lock);
5763 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
5764 init_completion(&tbl->complete);
5765
5766 tbl = &session->bc_slot_table;
5767 tbl->highest_used_slotid = NFS4_NO_SLOT;
5768 spin_lock_init(&tbl->slot_tbl_lock);
5769 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
5770 init_completion(&tbl->complete);
5771
5772 session->session_state = 1<<NFS4_SESSION_INITING;
5773
5774 session->clp = clp;
5775 return session;
5776 }
5777
5778 void nfs4_destroy_session(struct nfs4_session *session)
5779 {
5780 struct rpc_xprt *xprt;
5781 struct rpc_cred *cred;
5782
5783 cred = nfs4_get_exchange_id_cred(session->clp);
5784 nfs4_proc_destroy_session(session, cred);
5785 if (cred)
5786 put_rpccred(cred);
5787
5788 rcu_read_lock();
5789 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
5790 rcu_read_unlock();
5791 dprintk("%s Destroy backchannel for xprt %p\n",
5792 __func__, xprt);
5793 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
5794 nfs4_destroy_slot_tables(session);
5795 kfree(session);
5796 }
5797
5798 /*
5799 * Initialize the values to be used by the client in CREATE_SESSION
5800 * If nfs4_init_session set the fore channel request and response sizes,
5801 * use them.
5802 *
5803 * Set the back channel max_resp_sz_cached to zero to force the client to
5804 * always set csa_cachethis to FALSE because the current implementation
5805 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5806 */
5807 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5808 {
5809 struct nfs4_session *session = args->client->cl_session;
5810 unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
5811 mxresp_sz = session->fc_attrs.max_resp_sz;
5812
5813 if (mxrqst_sz == 0)
5814 mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5815 if (mxresp_sz == 0)
5816 mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5817 /* Fore channel attributes */
5818 args->fc_attrs.max_rqst_sz = mxrqst_sz;
5819 args->fc_attrs.max_resp_sz = mxresp_sz;
5820 args->fc_attrs.max_ops = NFS4_MAX_OPS;
5821 args->fc_attrs.max_reqs = max_session_slots;
5822
5823 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5824 "max_ops=%u max_reqs=%u\n",
5825 __func__,
5826 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5827 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5828
5829 /* Back channel attributes */
5830 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5831 args->bc_attrs.max_resp_sz = PAGE_SIZE;
5832 args->bc_attrs.max_resp_sz_cached = 0;
5833 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5834 args->bc_attrs.max_reqs = 1;
5835
5836 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5837 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5838 __func__,
5839 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5840 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5841 args->bc_attrs.max_reqs);
5842 }
5843
5844 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5845 {
5846 struct nfs4_channel_attrs *sent = &args->fc_attrs;
5847 struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5848
5849 if (rcvd->max_resp_sz > sent->max_resp_sz)
5850 return -EINVAL;
5851 /*
5852 * Our requested max_ops is the minimum we need; we're not
5853 * prepared to break up compounds into smaller pieces than that.
5854 * So, no point even trying to continue if the server won't
5855 * cooperate:
5856 */
5857 if (rcvd->max_ops < sent->max_ops)
5858 return -EINVAL;
5859 if (rcvd->max_reqs == 0)
5860 return -EINVAL;
5861 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
5862 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
5863 return 0;
5864 }
5865
5866 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5867 {
5868 struct nfs4_channel_attrs *sent = &args->bc_attrs;
5869 struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5870
5871 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5872 return -EINVAL;
5873 if (rcvd->max_resp_sz < sent->max_resp_sz)
5874 return -EINVAL;
5875 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5876 return -EINVAL;
5877 /* These would render the backchannel useless: */
5878 if (rcvd->max_ops != sent->max_ops)
5879 return -EINVAL;
5880 if (rcvd->max_reqs != sent->max_reqs)
5881 return -EINVAL;
5882 return 0;
5883 }
5884
5885 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5886 struct nfs4_session *session)
5887 {
5888 int ret;
5889
5890 ret = nfs4_verify_fore_channel_attrs(args, session);
5891 if (ret)
5892 return ret;
5893 return nfs4_verify_back_channel_attrs(args, session);
5894 }
5895
5896 static int _nfs4_proc_create_session(struct nfs_client *clp,
5897 struct rpc_cred *cred)
5898 {
5899 struct nfs4_session *session = clp->cl_session;
5900 struct nfs41_create_session_args args = {
5901 .client = clp,
5902 .cb_program = NFS4_CALLBACK,
5903 };
5904 struct nfs41_create_session_res res = {
5905 .client = clp,
5906 };
5907 struct rpc_message msg = {
5908 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5909 .rpc_argp = &args,
5910 .rpc_resp = &res,
5911 .rpc_cred = cred,
5912 };
5913 int status;
5914
5915 nfs4_init_channel_attrs(&args);
5916 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
5917
5918 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5919
5920 if (!status)
5921 /* Verify the session's negotiated channel_attrs values */
5922 status = nfs4_verify_channel_attrs(&args, session);
5923 if (!status) {
5924 /* Increment the clientid slot sequence id */
5925 clp->cl_seqid++;
5926 }
5927
5928 return status;
5929 }
5930
5931 /*
5932 * Issues a CREATE_SESSION operation to the server.
5933 * It is the responsibility of the caller to verify the session is
5934 * expired before calling this routine.
5935 */
5936 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
5937 {
5938 int status;
5939 unsigned *ptr;
5940 struct nfs4_session *session = clp->cl_session;
5941
5942 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5943
5944 status = _nfs4_proc_create_session(clp, cred);
5945 if (status)
5946 goto out;
5947
5948 /* Init or reset the session slot tables */
5949 status = nfs4_setup_session_slot_tables(session);
5950 dprintk("slot table setup returned %d\n", status);
5951 if (status)
5952 goto out;
5953
5954 ptr = (unsigned *)&session->sess_id.data[0];
5955 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
5956 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
5957 out:
5958 dprintk("<-- %s\n", __func__);
5959 return status;
5960 }
5961
5962 /*
5963 * Issue the over-the-wire RPC DESTROY_SESSION.
5964 * The caller must serialize access to this routine.
5965 */
5966 int nfs4_proc_destroy_session(struct nfs4_session *session,
5967 struct rpc_cred *cred)
5968 {
5969 struct rpc_message msg = {
5970 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
5971 .rpc_argp = session,
5972 .rpc_cred = cred,
5973 };
5974 int status = 0;
5975
5976 dprintk("--> nfs4_proc_destroy_session\n");
5977
5978 /* session is still being setup */
5979 if (session->clp->cl_cons_state != NFS_CS_READY)
5980 return status;
5981
5982 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5983
5984 if (status)
5985 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
5986 "Session has been destroyed regardless...\n", status);
5987
5988 dprintk("<-- nfs4_proc_destroy_session\n");
5989 return status;
5990 }
5991
5992 /*
5993 * With sessions, the client is not marked ready until after a
5994 * successful EXCHANGE_ID and CREATE_SESSION.
5995 *
5996 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
5997 * other versions of NFS can be tried.
5998 */
5999 static int nfs41_check_session_ready(struct nfs_client *clp)
6000 {
6001 int ret;
6002
6003 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
6004 ret = nfs4_client_recover_expired_lease(clp);
6005 if (ret)
6006 return ret;
6007 }
6008 if (clp->cl_cons_state < NFS_CS_READY)
6009 return -EPROTONOSUPPORT;
6010 smp_rmb();
6011 return 0;
6012 }
6013
6014 int nfs4_init_session(struct nfs_server *server)
6015 {
6016 struct nfs_client *clp = server->nfs_client;
6017 struct nfs4_session *session;
6018 unsigned int rsize, wsize;
6019
6020 if (!nfs4_has_session(clp))
6021 return 0;
6022
6023 session = clp->cl_session;
6024 spin_lock(&clp->cl_lock);
6025 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
6026
6027 rsize = server->rsize;
6028 if (rsize == 0)
6029 rsize = NFS_MAX_FILE_IO_SIZE;
6030 wsize = server->wsize;
6031 if (wsize == 0)
6032 wsize = NFS_MAX_FILE_IO_SIZE;
6033
6034 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
6035 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
6036 }
6037 spin_unlock(&clp->cl_lock);
6038
6039 return nfs41_check_session_ready(clp);
6040 }
6041
6042 int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
6043 {
6044 struct nfs4_session *session = clp->cl_session;
6045 int ret;
6046
6047 spin_lock(&clp->cl_lock);
6048 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
6049 /*
6050 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
6051 * DS lease to be equal to the MDS lease.
6052 */
6053 clp->cl_lease_time = lease_time;
6054 clp->cl_last_renewal = jiffies;
6055 }
6056 spin_unlock(&clp->cl_lock);
6057
6058 ret = nfs41_check_session_ready(clp);
6059 if (ret)
6060 return ret;
6061 /* Test for the DS role */
6062 if (!is_ds_client(clp))
6063 return -ENODEV;
6064 return 0;
6065 }
6066 EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
6067
6068
6069 /*
6070 * Renew the cl_session lease.
6071 */
6072 struct nfs4_sequence_data {
6073 struct nfs_client *clp;
6074 struct nfs4_sequence_args args;
6075 struct nfs4_sequence_res res;
6076 };
6077
6078 static void nfs41_sequence_release(void *data)
6079 {
6080 struct nfs4_sequence_data *calldata = data;
6081 struct nfs_client *clp = calldata->clp;
6082
6083 if (atomic_read(&clp->cl_count) > 1)
6084 nfs4_schedule_state_renewal(clp);
6085 nfs_put_client(clp);
6086 kfree(calldata);
6087 }
6088
6089 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
6090 {
6091 switch(task->tk_status) {
6092 case -NFS4ERR_DELAY:
6093 rpc_delay(task, NFS4_POLL_RETRY_MAX);
6094 return -EAGAIN;
6095 default:
6096 nfs4_schedule_lease_recovery(clp);
6097 }
6098 return 0;
6099 }
6100
6101 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
6102 {
6103 struct nfs4_sequence_data *calldata = data;
6104 struct nfs_client *clp = calldata->clp;
6105
6106 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
6107 return;
6108
6109 if (task->tk_status < 0) {
6110 dprintk("%s ERROR %d\n", __func__, task->tk_status);
6111 if (atomic_read(&clp->cl_count) == 1)
6112 goto out;
6113
6114 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
6115 rpc_restart_call_prepare(task);
6116 return;
6117 }
6118 }
6119 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
6120 out:
6121 dprintk("<-- %s\n", __func__);
6122 }
6123
6124 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
6125 {
6126 struct nfs4_sequence_data *calldata = data;
6127 struct nfs_client *clp = calldata->clp;
6128 struct nfs4_sequence_args *args;
6129 struct nfs4_sequence_res *res;
6130
6131 args = task->tk_msg.rpc_argp;
6132 res = task->tk_msg.rpc_resp;
6133
6134 if (nfs41_setup_sequence(clp->cl_session, args, res, task))
6135 return;
6136 rpc_call_start(task);
6137 }
6138
6139 static void nfs41_sequence_prepare_privileged(struct rpc_task *task, void *data)
6140 {
6141 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
6142 nfs41_sequence_prepare(task, data);
6143 }
6144
6145 static const struct rpc_call_ops nfs41_sequence_ops = {
6146 .rpc_call_done = nfs41_sequence_call_done,
6147 .rpc_call_prepare = nfs41_sequence_prepare,
6148 .rpc_release = nfs41_sequence_release,
6149 };
6150
6151 static const struct rpc_call_ops nfs41_sequence_privileged_ops = {
6152 .rpc_call_done = nfs41_sequence_call_done,
6153 .rpc_call_prepare = nfs41_sequence_prepare_privileged,
6154 .rpc_release = nfs41_sequence_release,
6155 };
6156
6157 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred,
6158 const struct rpc_call_ops *seq_ops)
6159 {
6160 struct nfs4_sequence_data *calldata;
6161 struct rpc_message msg = {
6162 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
6163 .rpc_cred = cred,
6164 };
6165 struct rpc_task_setup task_setup_data = {
6166 .rpc_client = clp->cl_rpcclient,
6167 .rpc_message = &msg,
6168 .callback_ops = seq_ops,
6169 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
6170 };
6171
6172 if (!atomic_inc_not_zero(&clp->cl_count))
6173 return ERR_PTR(-EIO);
6174 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
6175 if (calldata == NULL) {
6176 nfs_put_client(clp);
6177 return ERR_PTR(-ENOMEM);
6178 }
6179 nfs41_init_sequence(&calldata->args, &calldata->res, 0);
6180 msg.rpc_argp = &calldata->args;
6181 msg.rpc_resp = &calldata->res;
6182 calldata->clp = clp;
6183 task_setup_data.callback_data = calldata;
6184
6185 return rpc_run_task(&task_setup_data);
6186 }
6187
6188 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
6189 {
6190 struct rpc_task *task;
6191 int ret = 0;
6192
6193 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
6194 return 0;
6195 task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_ops);
6196 if (IS_ERR(task))
6197 ret = PTR_ERR(task);
6198 else
6199 rpc_put_task_async(task);
6200 dprintk("<-- %s status=%d\n", __func__, ret);
6201 return ret;
6202 }
6203
6204 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
6205 {
6206 struct rpc_task *task;
6207 int ret;
6208
6209 task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_privileged_ops);
6210 if (IS_ERR(task)) {
6211 ret = PTR_ERR(task);
6212 goto out;
6213 }
6214 ret = rpc_wait_for_completion_task(task);
6215 if (!ret) {
6216 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
6217
6218 if (task->tk_status == 0)
6219 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
6220 ret = task->tk_status;
6221 }
6222 rpc_put_task(task);
6223 out:
6224 dprintk("<-- %s status=%d\n", __func__, ret);
6225 return ret;
6226 }
6227
6228 struct nfs4_reclaim_complete_data {
6229 struct nfs_client *clp;
6230 struct nfs41_reclaim_complete_args arg;
6231 struct nfs41_reclaim_complete_res res;
6232 };
6233
6234 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
6235 {
6236 struct nfs4_reclaim_complete_data *calldata = data;
6237
6238 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
6239 if (nfs41_setup_sequence(calldata->clp->cl_session,
6240 &calldata->arg.seq_args,
6241 &calldata->res.seq_res, task))
6242 return;
6243
6244 rpc_call_start(task);
6245 }
6246
6247 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
6248 {
6249 switch(task->tk_status) {
6250 case 0:
6251 case -NFS4ERR_COMPLETE_ALREADY:
6252 case -NFS4ERR_WRONG_CRED: /* What to do here? */
6253 break;
6254 case -NFS4ERR_DELAY:
6255 rpc_delay(task, NFS4_POLL_RETRY_MAX);
6256 /* fall through */
6257 case -NFS4ERR_RETRY_UNCACHED_REP:
6258 return -EAGAIN;
6259 default:
6260 nfs4_schedule_lease_recovery(clp);
6261 }
6262 return 0;
6263 }
6264
6265 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
6266 {
6267 struct nfs4_reclaim_complete_data *calldata = data;
6268 struct nfs_client *clp = calldata->clp;
6269 struct nfs4_sequence_res *res = &calldata->res.seq_res;
6270
6271 dprintk("--> %s\n", __func__);
6272 if (!nfs41_sequence_done(task, res))
6273 return;
6274
6275 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
6276 rpc_restart_call_prepare(task);
6277 return;
6278 }
6279 dprintk("<-- %s\n", __func__);
6280 }
6281
6282 static void nfs4_free_reclaim_complete_data(void *data)
6283 {
6284 struct nfs4_reclaim_complete_data *calldata = data;
6285
6286 kfree(calldata);
6287 }
6288
6289 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
6290 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
6291 .rpc_call_done = nfs4_reclaim_complete_done,
6292 .rpc_release = nfs4_free_reclaim_complete_data,
6293 };
6294
6295 /*
6296 * Issue a global reclaim complete.
6297 */
6298 static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
6299 {
6300 struct nfs4_reclaim_complete_data *calldata;
6301 struct rpc_task *task;
6302 struct rpc_message msg = {
6303 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
6304 };
6305 struct rpc_task_setup task_setup_data = {
6306 .rpc_client = clp->cl_rpcclient,
6307 .rpc_message = &msg,
6308 .callback_ops = &nfs4_reclaim_complete_call_ops,
6309 .flags = RPC_TASK_ASYNC,
6310 };
6311 int status = -ENOMEM;
6312
6313 dprintk("--> %s\n", __func__);
6314 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
6315 if (calldata == NULL)
6316 goto out;
6317 calldata->clp = clp;
6318 calldata->arg.one_fs = 0;
6319
6320 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
6321 msg.rpc_argp = &calldata->arg;
6322 msg.rpc_resp = &calldata->res;
6323 task_setup_data.callback_data = calldata;
6324 task = rpc_run_task(&task_setup_data);
6325 if (IS_ERR(task)) {
6326 status = PTR_ERR(task);
6327 goto out;
6328 }
6329 status = nfs4_wait_for_completion_rpc_task(task);
6330 if (status == 0)
6331 status = task->tk_status;
6332 rpc_put_task(task);
6333 return 0;
6334 out:
6335 dprintk("<-- %s status=%d\n", __func__, status);
6336 return status;
6337 }
6338
6339 static void
6340 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
6341 {
6342 struct nfs4_layoutget *lgp = calldata;
6343 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6344
6345 dprintk("--> %s\n", __func__);
6346 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
6347 * right now covering the LAYOUTGET we are about to send.
6348 * However, that is not so catastrophic, and there seems
6349 * to be no way to prevent it completely.
6350 */
6351 if (nfs4_setup_sequence(server, &lgp->args.seq_args,
6352 &lgp->res.seq_res, task))
6353 return;
6354 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
6355 NFS_I(lgp->args.inode)->layout,
6356 lgp->args.ctx->state)) {
6357 rpc_exit(task, NFS4_OK);
6358 return;
6359 }
6360 rpc_call_start(task);
6361 }
6362
6363 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
6364 {
6365 struct nfs4_layoutget *lgp = calldata;
6366 struct inode *inode = lgp->args.inode;
6367 struct nfs_server *server = NFS_SERVER(inode);
6368 struct pnfs_layout_hdr *lo;
6369 struct nfs4_state *state = NULL;
6370
6371 dprintk("--> %s\n", __func__);
6372
6373 if (!nfs4_sequence_done(task, &lgp->res.seq_res))
6374 goto out;
6375
6376 switch (task->tk_status) {
6377 case 0:
6378 goto out;
6379 case -NFS4ERR_LAYOUTTRYLATER:
6380 case -NFS4ERR_RECALLCONFLICT:
6381 task->tk_status = -NFS4ERR_DELAY;
6382 break;
6383 case -NFS4ERR_EXPIRED:
6384 case -NFS4ERR_BAD_STATEID:
6385 spin_lock(&inode->i_lock);
6386 lo = NFS_I(inode)->layout;
6387 if (!lo || list_empty(&lo->plh_segs)) {
6388 spin_unlock(&inode->i_lock);
6389 /* If the open stateid was bad, then recover it. */
6390 state = lgp->args.ctx->state;
6391 } else {
6392 LIST_HEAD(head);
6393
6394 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
6395 spin_unlock(&inode->i_lock);
6396 /* Mark the bad layout state as invalid, then
6397 * retry using the open stateid. */
6398 pnfs_free_lseg_list(&head);
6399 }
6400 }
6401 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
6402 rpc_restart_call_prepare(task);
6403 out:
6404 dprintk("<-- %s\n", __func__);
6405 }
6406
6407 static size_t max_response_pages(struct nfs_server *server)
6408 {
6409 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
6410 return nfs_page_array_len(0, max_resp_sz);
6411 }
6412
6413 static void nfs4_free_pages(struct page **pages, size_t size)
6414 {
6415 int i;
6416
6417 if (!pages)
6418 return;
6419
6420 for (i = 0; i < size; i++) {
6421 if (!pages[i])
6422 break;
6423 __free_page(pages[i]);
6424 }
6425 kfree(pages);
6426 }
6427
6428 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
6429 {
6430 struct page **pages;
6431 int i;
6432
6433 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
6434 if (!pages) {
6435 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
6436 return NULL;
6437 }
6438
6439 for (i = 0; i < size; i++) {
6440 pages[i] = alloc_page(gfp_flags);
6441 if (!pages[i]) {
6442 dprintk("%s: failed to allocate page\n", __func__);
6443 nfs4_free_pages(pages, size);
6444 return NULL;
6445 }
6446 }
6447
6448 return pages;
6449 }
6450
6451 static void nfs4_layoutget_release(void *calldata)
6452 {
6453 struct nfs4_layoutget *lgp = calldata;
6454 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6455 size_t max_pages = max_response_pages(server);
6456
6457 dprintk("--> %s\n", __func__);
6458 nfs4_free_pages(lgp->args.layout.pages, max_pages);
6459 put_nfs_open_context(lgp->args.ctx);
6460 kfree(calldata);
6461 dprintk("<-- %s\n", __func__);
6462 }
6463
6464 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
6465 .rpc_call_prepare = nfs4_layoutget_prepare,
6466 .rpc_call_done = nfs4_layoutget_done,
6467 .rpc_release = nfs4_layoutget_release,
6468 };
6469
6470 struct pnfs_layout_segment *
6471 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
6472 {
6473 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6474 size_t max_pages = max_response_pages(server);
6475 struct rpc_task *task;
6476 struct rpc_message msg = {
6477 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
6478 .rpc_argp = &lgp->args,
6479 .rpc_resp = &lgp->res,
6480 };
6481 struct rpc_task_setup task_setup_data = {
6482 .rpc_client = server->client,
6483 .rpc_message = &msg,
6484 .callback_ops = &nfs4_layoutget_call_ops,
6485 .callback_data = lgp,
6486 .flags = RPC_TASK_ASYNC,
6487 };
6488 struct pnfs_layout_segment *lseg = NULL;
6489 int status = 0;
6490
6491 dprintk("--> %s\n", __func__);
6492
6493 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
6494 if (!lgp->args.layout.pages) {
6495 nfs4_layoutget_release(lgp);
6496 return ERR_PTR(-ENOMEM);
6497 }
6498 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
6499
6500 lgp->res.layoutp = &lgp->args.layout;
6501 lgp->res.seq_res.sr_slot = NULL;
6502 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6503 task = rpc_run_task(&task_setup_data);
6504 if (IS_ERR(task))
6505 return ERR_CAST(task);
6506 status = nfs4_wait_for_completion_rpc_task(task);
6507 if (status == 0)
6508 status = task->tk_status;
6509 if (status == 0)
6510 lseg = pnfs_layout_process(lgp);
6511 rpc_put_task(task);
6512 dprintk("<-- %s status=%d\n", __func__, status);
6513 if (status)
6514 return ERR_PTR(status);
6515 return lseg;
6516 }
6517
6518 static void
6519 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
6520 {
6521 struct nfs4_layoutreturn *lrp = calldata;
6522
6523 dprintk("--> %s\n", __func__);
6524 if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
6525 &lrp->res.seq_res, task))
6526 return;
6527 rpc_call_start(task);
6528 }
6529
6530 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6531 {
6532 struct nfs4_layoutreturn *lrp = calldata;
6533 struct nfs_server *server;
6534
6535 dprintk("--> %s\n", __func__);
6536
6537 if (!nfs4_sequence_done(task, &lrp->res.seq_res))
6538 return;
6539
6540 server = NFS_SERVER(lrp->args.inode);
6541 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6542 rpc_restart_call_prepare(task);
6543 return;
6544 }
6545 dprintk("<-- %s\n", __func__);
6546 }
6547
6548 static void nfs4_layoutreturn_release(void *calldata)
6549 {
6550 struct nfs4_layoutreturn *lrp = calldata;
6551 struct pnfs_layout_hdr *lo = lrp->args.layout;
6552
6553 dprintk("--> %s\n", __func__);
6554 spin_lock(&lo->plh_inode->i_lock);
6555 if (lrp->res.lrs_present)
6556 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6557 lo->plh_block_lgets--;
6558 spin_unlock(&lo->plh_inode->i_lock);
6559 pnfs_put_layout_hdr(lrp->args.layout);
6560 kfree(calldata);
6561 dprintk("<-- %s\n", __func__);
6562 }
6563
6564 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
6565 .rpc_call_prepare = nfs4_layoutreturn_prepare,
6566 .rpc_call_done = nfs4_layoutreturn_done,
6567 .rpc_release = nfs4_layoutreturn_release,
6568 };
6569
6570 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
6571 {
6572 struct rpc_task *task;
6573 struct rpc_message msg = {
6574 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
6575 .rpc_argp = &lrp->args,
6576 .rpc_resp = &lrp->res,
6577 };
6578 struct rpc_task_setup task_setup_data = {
6579 .rpc_client = lrp->clp->cl_rpcclient,
6580 .rpc_message = &msg,
6581 .callback_ops = &nfs4_layoutreturn_call_ops,
6582 .callback_data = lrp,
6583 };
6584 int status;
6585
6586 dprintk("--> %s\n", __func__);
6587 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
6588 task = rpc_run_task(&task_setup_data);
6589 if (IS_ERR(task))
6590 return PTR_ERR(task);
6591 status = task->tk_status;
6592 dprintk("<-- %s status=%d\n", __func__, status);
6593 rpc_put_task(task);
6594 return status;
6595 }
6596
6597 /*
6598 * Retrieve the list of Data Server devices from the MDS.
6599 */
6600 static int _nfs4_getdevicelist(struct nfs_server *server,
6601 const struct nfs_fh *fh,
6602 struct pnfs_devicelist *devlist)
6603 {
6604 struct nfs4_getdevicelist_args args = {
6605 .fh = fh,
6606 .layoutclass = server->pnfs_curr_ld->id,
6607 };
6608 struct nfs4_getdevicelist_res res = {
6609 .devlist = devlist,
6610 };
6611 struct rpc_message msg = {
6612 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
6613 .rpc_argp = &args,
6614 .rpc_resp = &res,
6615 };
6616 int status;
6617
6618 dprintk("--> %s\n", __func__);
6619 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
6620 &res.seq_res, 0);
6621 dprintk("<-- %s status=%d\n", __func__, status);
6622 return status;
6623 }
6624
6625 int nfs4_proc_getdevicelist(struct nfs_server *server,
6626 const struct nfs_fh *fh,
6627 struct pnfs_devicelist *devlist)
6628 {
6629 struct nfs4_exception exception = { };
6630 int err;
6631
6632 do {
6633 err = nfs4_handle_exception(server,
6634 _nfs4_getdevicelist(server, fh, devlist),
6635 &exception);
6636 } while (exception.retry);
6637
6638 dprintk("%s: err=%d, num_devs=%u\n", __func__,
6639 err, devlist->num_devs);
6640
6641 return err;
6642 }
6643 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
6644
6645 static int
6646 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6647 {
6648 struct nfs4_getdeviceinfo_args args = {
6649 .pdev = pdev,
6650 };
6651 struct nfs4_getdeviceinfo_res res = {
6652 .pdev = pdev,
6653 };
6654 struct rpc_message msg = {
6655 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
6656 .rpc_argp = &args,
6657 .rpc_resp = &res,
6658 };
6659 int status;
6660
6661 dprintk("--> %s\n", __func__);
6662 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6663 dprintk("<-- %s status=%d\n", __func__, status);
6664
6665 return status;
6666 }
6667
6668 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6669 {
6670 struct nfs4_exception exception = { };
6671 int err;
6672
6673 do {
6674 err = nfs4_handle_exception(server,
6675 _nfs4_proc_getdeviceinfo(server, pdev),
6676 &exception);
6677 } while (exception.retry);
6678 return err;
6679 }
6680 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
6681
6682 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
6683 {
6684 struct nfs4_layoutcommit_data *data = calldata;
6685 struct nfs_server *server = NFS_SERVER(data->args.inode);
6686
6687 if (nfs4_setup_sequence(server, &data->args.seq_args,
6688 &data->res.seq_res, task))
6689 return;
6690 rpc_call_start(task);
6691 }
6692
6693 static void
6694 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6695 {
6696 struct nfs4_layoutcommit_data *data = calldata;
6697 struct nfs_server *server = NFS_SERVER(data->args.inode);
6698
6699 if (!nfs4_sequence_done(task, &data->res.seq_res))
6700 return;
6701
6702 switch (task->tk_status) { /* Just ignore these failures */
6703 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
6704 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
6705 case -NFS4ERR_BADLAYOUT: /* no layout */
6706 case -NFS4ERR_GRACE: /* loca_recalim always false */
6707 task->tk_status = 0;
6708 break;
6709 case 0:
6710 nfs_post_op_update_inode_force_wcc(data->args.inode,
6711 data->res.fattr);
6712 break;
6713 default:
6714 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6715 rpc_restart_call_prepare(task);
6716 return;
6717 }
6718 }
6719 }
6720
6721 static void nfs4_layoutcommit_release(void *calldata)
6722 {
6723 struct nfs4_layoutcommit_data *data = calldata;
6724 struct pnfs_layout_segment *lseg, *tmp;
6725 unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
6726
6727 pnfs_cleanup_layoutcommit(data);
6728 /* Matched by references in pnfs_set_layoutcommit */
6729 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
6730 list_del_init(&lseg->pls_lc_list);
6731 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
6732 &lseg->pls_flags))
6733 pnfs_put_lseg(lseg);
6734 }
6735
6736 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
6737 smp_mb__after_clear_bit();
6738 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
6739
6740 put_rpccred(data->cred);
6741 kfree(data);
6742 }
6743
6744 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
6745 .rpc_call_prepare = nfs4_layoutcommit_prepare,
6746 .rpc_call_done = nfs4_layoutcommit_done,
6747 .rpc_release = nfs4_layoutcommit_release,
6748 };
6749
6750 int
6751 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
6752 {
6753 struct rpc_message msg = {
6754 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
6755 .rpc_argp = &data->args,
6756 .rpc_resp = &data->res,
6757 .rpc_cred = data->cred,
6758 };
6759 struct rpc_task_setup task_setup_data = {
6760 .task = &data->task,
6761 .rpc_client = NFS_CLIENT(data->args.inode),
6762 .rpc_message = &msg,
6763 .callback_ops = &nfs4_layoutcommit_ops,
6764 .callback_data = data,
6765 .flags = RPC_TASK_ASYNC,
6766 };
6767 struct rpc_task *task;
6768 int status = 0;
6769
6770 dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6771 "lbw: %llu inode %lu\n",
6772 data->task.tk_pid, sync,
6773 data->args.lastbytewritten,
6774 data->args.inode->i_ino);
6775
6776 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
6777 task = rpc_run_task(&task_setup_data);
6778 if (IS_ERR(task))
6779 return PTR_ERR(task);
6780 if (sync == false)
6781 goto out;
6782 status = nfs4_wait_for_completion_rpc_task(task);
6783 if (status != 0)
6784 goto out;
6785 status = task->tk_status;
6786 out:
6787 dprintk("%s: status %d\n", __func__, status);
6788 rpc_put_task(task);
6789 return status;
6790 }
6791
6792 static int
6793 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6794 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6795 {
6796 struct nfs41_secinfo_no_name_args args = {
6797 .style = SECINFO_STYLE_CURRENT_FH,
6798 };
6799 struct nfs4_secinfo_res res = {
6800 .flavors = flavors,
6801 };
6802 struct rpc_message msg = {
6803 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
6804 .rpc_argp = &args,
6805 .rpc_resp = &res,
6806 };
6807 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6808 }
6809
6810 static int
6811 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6812 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6813 {
6814 struct nfs4_exception exception = { };
6815 int err;
6816 do {
6817 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6818 switch (err) {
6819 case 0:
6820 case -NFS4ERR_WRONGSEC:
6821 case -NFS4ERR_NOTSUPP:
6822 goto out;
6823 default:
6824 err = nfs4_handle_exception(server, err, &exception);
6825 }
6826 } while (exception.retry);
6827 out:
6828 return err;
6829 }
6830
6831 static int
6832 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6833 struct nfs_fsinfo *info)
6834 {
6835 int err;
6836 struct page *page;
6837 rpc_authflavor_t flavor;
6838 struct nfs4_secinfo_flavors *flavors;
6839
6840 page = alloc_page(GFP_KERNEL);
6841 if (!page) {
6842 err = -ENOMEM;
6843 goto out;
6844 }
6845
6846 flavors = page_address(page);
6847 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6848
6849 /*
6850 * Fall back on "guess and check" method if
6851 * the server doesn't support SECINFO_NO_NAME
6852 */
6853 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
6854 err = nfs4_find_root_sec(server, fhandle, info);
6855 goto out_freepage;
6856 }
6857 if (err)
6858 goto out_freepage;
6859
6860 flavor = nfs_find_best_sec(flavors);
6861 if (err == 0)
6862 err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6863
6864 out_freepage:
6865 put_page(page);
6866 if (err == -EACCES)
6867 return -EPERM;
6868 out:
6869 return err;
6870 }
6871
6872 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6873 {
6874 int status;
6875 struct nfs41_test_stateid_args args = {
6876 .stateid = stateid,
6877 };
6878 struct nfs41_test_stateid_res res;
6879 struct rpc_message msg = {
6880 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
6881 .rpc_argp = &args,
6882 .rpc_resp = &res,
6883 };
6884
6885 dprintk("NFS call test_stateid %p\n", stateid);
6886 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6887 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6888 if (status != NFS_OK) {
6889 dprintk("NFS reply test_stateid: failed, %d\n", status);
6890 return status;
6891 }
6892 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
6893 return -res.status;
6894 }
6895
6896 /**
6897 * nfs41_test_stateid - perform a TEST_STATEID operation
6898 *
6899 * @server: server / transport on which to perform the operation
6900 * @stateid: state ID to test
6901 *
6902 * Returns NFS_OK if the server recognizes that "stateid" is valid.
6903 * Otherwise a negative NFS4ERR value is returned if the operation
6904 * failed or the state ID is not currently valid.
6905 */
6906 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6907 {
6908 struct nfs4_exception exception = { };
6909 int err;
6910 do {
6911 err = _nfs41_test_stateid(server, stateid);
6912 if (err != -NFS4ERR_DELAY)
6913 break;
6914 nfs4_handle_exception(server, err, &exception);
6915 } while (exception.retry);
6916 return err;
6917 }
6918
6919 static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6920 {
6921 struct nfs41_free_stateid_args args = {
6922 .stateid = stateid,
6923 };
6924 struct nfs41_free_stateid_res res;
6925 struct rpc_message msg = {
6926 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
6927 .rpc_argp = &args,
6928 .rpc_resp = &res,
6929 };
6930 int status;
6931
6932 dprintk("NFS call free_stateid %p\n", stateid);
6933 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6934 status = nfs4_call_sync_sequence(server->client, server, &msg,
6935 &args.seq_args, &res.seq_res, 1);
6936 dprintk("NFS reply free_stateid: %d\n", status);
6937 return status;
6938 }
6939
6940 /**
6941 * nfs41_free_stateid - perform a FREE_STATEID operation
6942 *
6943 * @server: server / transport on which to perform the operation
6944 * @stateid: state ID to release
6945 *
6946 * Returns NFS_OK if the server freed "stateid". Otherwise a
6947 * negative NFS4ERR value is returned.
6948 */
6949 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6950 {
6951 struct nfs4_exception exception = { };
6952 int err;
6953 do {
6954 err = _nfs4_free_stateid(server, stateid);
6955 if (err != -NFS4ERR_DELAY)
6956 break;
6957 nfs4_handle_exception(server, err, &exception);
6958 } while (exception.retry);
6959 return err;
6960 }
6961
6962 static bool nfs41_match_stateid(const nfs4_stateid *s1,
6963 const nfs4_stateid *s2)
6964 {
6965 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
6966 return false;
6967
6968 if (s1->seqid == s2->seqid)
6969 return true;
6970 if (s1->seqid == 0 || s2->seqid == 0)
6971 return true;
6972
6973 return false;
6974 }
6975
6976 #endif /* CONFIG_NFS_V4_1 */
6977
6978 static bool nfs4_match_stateid(const nfs4_stateid *s1,
6979 const nfs4_stateid *s2)
6980 {
6981 return nfs4_stateid_match(s1, s2);
6982 }
6983
6984
6985 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
6986 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6987 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6988 .recover_open = nfs4_open_reclaim,
6989 .recover_lock = nfs4_lock_reclaim,
6990 .establish_clid = nfs4_init_clientid,
6991 .get_clid_cred = nfs4_get_setclientid_cred,
6992 .detect_trunking = nfs40_discover_server_trunking,
6993 };
6994
6995 #if defined(CONFIG_NFS_V4_1)
6996 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
6997 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6998 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6999 .recover_open = nfs4_open_reclaim,
7000 .recover_lock = nfs4_lock_reclaim,
7001 .establish_clid = nfs41_init_clientid,
7002 .get_clid_cred = nfs4_get_exchange_id_cred,
7003 .reclaim_complete = nfs41_proc_reclaim_complete,
7004 .detect_trunking = nfs41_discover_server_trunking,
7005 };
7006 #endif /* CONFIG_NFS_V4_1 */
7007
7008 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
7009 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
7010 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
7011 .recover_open = nfs4_open_expired,
7012 .recover_lock = nfs4_lock_expired,
7013 .establish_clid = nfs4_init_clientid,
7014 .get_clid_cred = nfs4_get_setclientid_cred,
7015 };
7016
7017 #if defined(CONFIG_NFS_V4_1)
7018 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
7019 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
7020 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
7021 .recover_open = nfs41_open_expired,
7022 .recover_lock = nfs41_lock_expired,
7023 .establish_clid = nfs41_init_clientid,
7024 .get_clid_cred = nfs4_get_exchange_id_cred,
7025 };
7026 #endif /* CONFIG_NFS_V4_1 */
7027
7028 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
7029 .sched_state_renewal = nfs4_proc_async_renew,
7030 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
7031 .renew_lease = nfs4_proc_renew,
7032 };
7033
7034 #if defined(CONFIG_NFS_V4_1)
7035 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
7036 .sched_state_renewal = nfs41_proc_async_sequence,
7037 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
7038 .renew_lease = nfs4_proc_sequence,
7039 };
7040 #endif
7041
7042 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
7043 .minor_version = 0,
7044 .call_sync = _nfs4_call_sync,
7045 .match_stateid = nfs4_match_stateid,
7046 .find_root_sec = nfs4_find_root_sec,
7047 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
7048 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
7049 .state_renewal_ops = &nfs40_state_renewal_ops,
7050 };
7051
7052 #if defined(CONFIG_NFS_V4_1)
7053 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
7054 .minor_version = 1,
7055 .call_sync = _nfs4_call_sync_session,
7056 .match_stateid = nfs41_match_stateid,
7057 .find_root_sec = nfs41_find_root_sec,
7058 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
7059 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
7060 .state_renewal_ops = &nfs41_state_renewal_ops,
7061 };
7062 #endif
7063
7064 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
7065 [0] = &nfs_v4_0_minor_ops,
7066 #if defined(CONFIG_NFS_V4_1)
7067 [1] = &nfs_v4_1_minor_ops,
7068 #endif
7069 };
7070
7071 const struct inode_operations nfs4_dir_inode_operations = {
7072 .create = nfs_create,
7073 .lookup = nfs_lookup,
7074 .atomic_open = nfs_atomic_open,
7075 .link = nfs_link,
7076 .unlink = nfs_unlink,
7077 .symlink = nfs_symlink,
7078 .mkdir = nfs_mkdir,
7079 .rmdir = nfs_rmdir,
7080 .mknod = nfs_mknod,
7081 .rename = nfs_rename,
7082 .permission = nfs_permission,
7083 .getattr = nfs_getattr,
7084 .setattr = nfs_setattr,
7085 .getxattr = generic_getxattr,
7086 .setxattr = generic_setxattr,
7087 .listxattr = generic_listxattr,
7088 .removexattr = generic_removexattr,
7089 };
7090
7091 static const struct inode_operations nfs4_file_inode_operations = {
7092 .permission = nfs_permission,
7093 .getattr = nfs_getattr,
7094 .setattr = nfs_setattr,
7095 .getxattr = generic_getxattr,
7096 .setxattr = generic_setxattr,
7097 .listxattr = generic_listxattr,
7098 .removexattr = generic_removexattr,
7099 };
7100
7101 const struct nfs_rpc_ops nfs_v4_clientops = {
7102 .version = 4, /* protocol version */
7103 .dentry_ops = &nfs4_dentry_operations,
7104 .dir_inode_ops = &nfs4_dir_inode_operations,
7105 .file_inode_ops = &nfs4_file_inode_operations,
7106 .file_ops = &nfs4_file_operations,
7107 .getroot = nfs4_proc_get_root,
7108 .submount = nfs4_submount,
7109 .try_mount = nfs4_try_mount,
7110 .getattr = nfs4_proc_getattr,
7111 .setattr = nfs4_proc_setattr,
7112 .lookup = nfs4_proc_lookup,
7113 .access = nfs4_proc_access,
7114 .readlink = nfs4_proc_readlink,
7115 .create = nfs4_proc_create,
7116 .remove = nfs4_proc_remove,
7117 .unlink_setup = nfs4_proc_unlink_setup,
7118 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
7119 .unlink_done = nfs4_proc_unlink_done,
7120 .rename = nfs4_proc_rename,
7121 .rename_setup = nfs4_proc_rename_setup,
7122 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
7123 .rename_done = nfs4_proc_rename_done,
7124 .link = nfs4_proc_link,
7125 .symlink = nfs4_proc_symlink,
7126 .mkdir = nfs4_proc_mkdir,
7127 .rmdir = nfs4_proc_remove,
7128 .readdir = nfs4_proc_readdir,
7129 .mknod = nfs4_proc_mknod,
7130 .statfs = nfs4_proc_statfs,
7131 .fsinfo = nfs4_proc_fsinfo,
7132 .pathconf = nfs4_proc_pathconf,
7133 .set_capabilities = nfs4_server_capabilities,
7134 .decode_dirent = nfs4_decode_dirent,
7135 .read_setup = nfs4_proc_read_setup,
7136 .read_pageio_init = pnfs_pageio_init_read,
7137 .read_rpc_prepare = nfs4_proc_read_rpc_prepare,
7138 .read_done = nfs4_read_done,
7139 .write_setup = nfs4_proc_write_setup,
7140 .write_pageio_init = pnfs_pageio_init_write,
7141 .write_rpc_prepare = nfs4_proc_write_rpc_prepare,
7142 .write_done = nfs4_write_done,
7143 .commit_setup = nfs4_proc_commit_setup,
7144 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
7145 .commit_done = nfs4_commit_done,
7146 .lock = nfs4_proc_lock,
7147 .clear_acl_cache = nfs4_zap_acl_attr,
7148 .close_context = nfs4_close_context,
7149 .open_context = nfs4_atomic_open,
7150 .have_delegation = nfs4_have_delegation,
7151 .return_delegation = nfs4_inode_return_delegation,
7152 .alloc_client = nfs4_alloc_client,
7153 .init_client = nfs4_init_client,
7154 .free_client = nfs4_free_client,
7155 .create_server = nfs4_create_server,
7156 .clone_server = nfs_clone_server,
7157 };
7158
7159 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
7160 .prefix = XATTR_NAME_NFSV4_ACL,
7161 .list = nfs4_xattr_list_nfs4_acl,
7162 .get = nfs4_xattr_get_nfs4_acl,
7163 .set = nfs4_xattr_set_nfs4_acl,
7164 };
7165
7166 const struct xattr_handler *nfs4_xattr_handlers[] = {
7167 &nfs4_xattr_nfs4_acl_handler,
7168 NULL
7169 };
7170
7171 /*
7172 * Local variables:
7173 * c-basic-offset: 8
7174 * End:
7175 */