]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/nfsd/nfs4state.c
nfsd4: kill some unneeded setclientid comments
[mirror_ubuntu-jammy-kernel.git] / fs / nfsd / nfs4state.c
1 /*
2 * linux/fs/nfsd/nfs4state.c
3 *
4 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * All rights reserved.
6 *
7 * Kendrick Smith <kmsmith@umich.edu>
8 * Andy Adamson <kandros@umich.edu>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 */
36
37 #include <linux/param.h>
38 #include <linux/major.h>
39 #include <linux/slab.h>
40
41 #include <linux/sunrpc/svc.h>
42 #include <linux/nfsd/nfsd.h>
43 #include <linux/nfsd/cache.h>
44 #include <linux/mount.h>
45 #include <linux/workqueue.h>
46 #include <linux/smp_lock.h>
47 #include <linux/kthread.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfsd/state.h>
50 #include <linux/nfsd/xdr4.h>
51 #include <linux/namei.h>
52 #include <linux/swap.h>
53 #include <linux/mutex.h>
54 #include <linux/lockd/bind.h>
55 #include <linux/module.h>
56
57 #define NFSDDBG_FACILITY NFSDDBG_PROC
58
59 /* Globals */
60 static time_t lease_time = 90; /* default lease time */
61 static time_t user_lease_time = 90;
62 static time_t boot_time;
63 static int in_grace = 1;
64 static u32 current_clientid = 1;
65 static u32 current_ownerid = 1;
66 static u32 current_fileid = 1;
67 static u32 current_delegid = 1;
68 static u32 nfs4_init;
69 static stateid_t zerostateid; /* bits all 0 */
70 static stateid_t onestateid; /* bits all 1 */
71
72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zerostateid, sizeof(stateid_t)))
73 #define ONE_STATEID(stateid) (!memcmp((stateid), &onestateid, sizeof(stateid_t)))
74
75 /* forward declarations */
76 static struct nfs4_stateid * find_stateid(stateid_t *stid, int flags);
77 static struct nfs4_delegation * find_delegation_stateid(struct inode *ino, stateid_t *stid);
78 static void release_stateid_lockowners(struct nfs4_stateid *open_stp);
79 static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
80 static void nfs4_set_recdir(char *recdir);
81
82 /* Locking:
83 *
84 * client_mutex:
85 * protects clientid_hashtbl[], clientstr_hashtbl[],
86 * unconfstr_hashtbl[], uncofid_hashtbl[].
87 */
88 static DEFINE_MUTEX(client_mutex);
89
90 static struct kmem_cache *stateowner_slab = NULL;
91 static struct kmem_cache *file_slab = NULL;
92 static struct kmem_cache *stateid_slab = NULL;
93 static struct kmem_cache *deleg_slab = NULL;
94
95 void
96 nfs4_lock_state(void)
97 {
98 mutex_lock(&client_mutex);
99 }
100
101 void
102 nfs4_unlock_state(void)
103 {
104 mutex_unlock(&client_mutex);
105 }
106
107 static inline u32
108 opaque_hashval(const void *ptr, int nbytes)
109 {
110 unsigned char *cptr = (unsigned char *) ptr;
111
112 u32 x = 0;
113 while (nbytes--) {
114 x *= 37;
115 x += *cptr++;
116 }
117 return x;
118 }
119
120 /* forward declarations */
121 static void release_stateowner(struct nfs4_stateowner *sop);
122 static void release_stateid(struct nfs4_stateid *stp, int flags);
123
124 /*
125 * Delegation state
126 */
127
128 /* recall_lock protects the del_recall_lru */
129 static DEFINE_SPINLOCK(recall_lock);
130 static struct list_head del_recall_lru;
131
132 static void
133 free_nfs4_file(struct kref *kref)
134 {
135 struct nfs4_file *fp = container_of(kref, struct nfs4_file, fi_ref);
136 list_del(&fp->fi_hash);
137 iput(fp->fi_inode);
138 kmem_cache_free(file_slab, fp);
139 }
140
141 static inline void
142 put_nfs4_file(struct nfs4_file *fi)
143 {
144 kref_put(&fi->fi_ref, free_nfs4_file);
145 }
146
147 static inline void
148 get_nfs4_file(struct nfs4_file *fi)
149 {
150 kref_get(&fi->fi_ref);
151 }
152
153 static int num_delegations;
154 unsigned int max_delegations;
155
156 /*
157 * Open owner state (share locks)
158 */
159
160 /* hash tables for nfs4_stateowner */
161 #define OWNER_HASH_BITS 8
162 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
163 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
164
165 #define ownerid_hashval(id) \
166 ((id) & OWNER_HASH_MASK)
167 #define ownerstr_hashval(clientid, ownername) \
168 (((clientid) + opaque_hashval((ownername.data), (ownername.len))) & OWNER_HASH_MASK)
169
170 static struct list_head ownerid_hashtbl[OWNER_HASH_SIZE];
171 static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE];
172
173 /* hash table for nfs4_file */
174 #define FILE_HASH_BITS 8
175 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
176 #define FILE_HASH_MASK (FILE_HASH_SIZE - 1)
177 /* hash table for (open)nfs4_stateid */
178 #define STATEID_HASH_BITS 10
179 #define STATEID_HASH_SIZE (1 << STATEID_HASH_BITS)
180 #define STATEID_HASH_MASK (STATEID_HASH_SIZE - 1)
181
182 #define file_hashval(x) \
183 hash_ptr(x, FILE_HASH_BITS)
184 #define stateid_hashval(owner_id, file_id) \
185 (((owner_id) + (file_id)) & STATEID_HASH_MASK)
186
187 static struct list_head file_hashtbl[FILE_HASH_SIZE];
188 static struct list_head stateid_hashtbl[STATEID_HASH_SIZE];
189
190 static struct nfs4_delegation *
191 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_fh *current_fh, u32 type)
192 {
193 struct nfs4_delegation *dp;
194 struct nfs4_file *fp = stp->st_file;
195 struct nfs4_callback *cb = &stp->st_stateowner->so_client->cl_callback;
196
197 dprintk("NFSD alloc_init_deleg\n");
198 if (fp->fi_had_conflict)
199 return NULL;
200 if (num_delegations > max_delegations)
201 return NULL;
202 dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
203 if (dp == NULL)
204 return dp;
205 num_delegations++;
206 INIT_LIST_HEAD(&dp->dl_perfile);
207 INIT_LIST_HEAD(&dp->dl_perclnt);
208 INIT_LIST_HEAD(&dp->dl_recall_lru);
209 dp->dl_client = clp;
210 get_nfs4_file(fp);
211 dp->dl_file = fp;
212 dp->dl_flock = NULL;
213 get_file(stp->st_vfs_file);
214 dp->dl_vfs_file = stp->st_vfs_file;
215 dp->dl_type = type;
216 dp->dl_recall.cbr_dp = NULL;
217 dp->dl_recall.cbr_ident = cb->cb_ident;
218 dp->dl_recall.cbr_trunc = 0;
219 dp->dl_stateid.si_boot = boot_time;
220 dp->dl_stateid.si_stateownerid = current_delegid++;
221 dp->dl_stateid.si_fileid = 0;
222 dp->dl_stateid.si_generation = 0;
223 dp->dl_fhlen = current_fh->fh_handle.fh_size;
224 memcpy(dp->dl_fhval, &current_fh->fh_handle.fh_base,
225 current_fh->fh_handle.fh_size);
226 dp->dl_time = 0;
227 atomic_set(&dp->dl_count, 1);
228 list_add(&dp->dl_perfile, &fp->fi_delegations);
229 list_add(&dp->dl_perclnt, &clp->cl_delegations);
230 return dp;
231 }
232
233 void
234 nfs4_put_delegation(struct nfs4_delegation *dp)
235 {
236 if (atomic_dec_and_test(&dp->dl_count)) {
237 dprintk("NFSD: freeing dp %p\n",dp);
238 put_nfs4_file(dp->dl_file);
239 kmem_cache_free(deleg_slab, dp);
240 num_delegations--;
241 }
242 }
243
244 /* Remove the associated file_lock first, then remove the delegation.
245 * lease_modify() is called to remove the FS_LEASE file_lock from
246 * the i_flock list, eventually calling nfsd's lock_manager
247 * fl_release_callback.
248 */
249 static void
250 nfs4_close_delegation(struct nfs4_delegation *dp)
251 {
252 struct file *filp = dp->dl_vfs_file;
253
254 dprintk("NFSD: close_delegation dp %p\n",dp);
255 dp->dl_vfs_file = NULL;
256 /* The following nfsd_close may not actually close the file,
257 * but we want to remove the lease in any case. */
258 if (dp->dl_flock)
259 vfs_setlease(filp, F_UNLCK, &dp->dl_flock);
260 nfsd_close(filp);
261 }
262
263 /* Called under the state lock. */
264 static void
265 unhash_delegation(struct nfs4_delegation *dp)
266 {
267 list_del_init(&dp->dl_perfile);
268 list_del_init(&dp->dl_perclnt);
269 spin_lock(&recall_lock);
270 list_del_init(&dp->dl_recall_lru);
271 spin_unlock(&recall_lock);
272 nfs4_close_delegation(dp);
273 nfs4_put_delegation(dp);
274 }
275
276 /*
277 * SETCLIENTID state
278 */
279
280 /* Hash tables for nfs4_clientid state */
281 #define CLIENT_HASH_BITS 4
282 #define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS)
283 #define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1)
284
285 #define clientid_hashval(id) \
286 ((id) & CLIENT_HASH_MASK)
287 #define clientstr_hashval(name) \
288 (opaque_hashval((name), 8) & CLIENT_HASH_MASK)
289 /*
290 * reclaim_str_hashtbl[] holds known client info from previous reset/reboot
291 * used in reboot/reset lease grace period processing
292 *
293 * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed
294 * setclientid_confirmed info.
295 *
296 * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed
297 * setclientid info.
298 *
299 * client_lru holds client queue ordered by nfs4_client.cl_time
300 * for lease renewal.
301 *
302 * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
303 * for last close replay.
304 */
305 static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE];
306 static int reclaim_str_hashtbl_size = 0;
307 static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE];
308 static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE];
309 static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE];
310 static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
311 static struct list_head client_lru;
312 static struct list_head close_lru;
313
314 static inline void
315 renew_client(struct nfs4_client *clp)
316 {
317 /*
318 * Move client to the end to the LRU list.
319 */
320 dprintk("renewing client (clientid %08x/%08x)\n",
321 clp->cl_clientid.cl_boot,
322 clp->cl_clientid.cl_id);
323 list_move_tail(&clp->cl_lru, &client_lru);
324 clp->cl_time = get_seconds();
325 }
326
327 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
328 static int
329 STALE_CLIENTID(clientid_t *clid)
330 {
331 if (clid->cl_boot == boot_time)
332 return 0;
333 dprintk("NFSD stale clientid (%08x/%08x)\n",
334 clid->cl_boot, clid->cl_id);
335 return 1;
336 }
337
338 /*
339 * XXX Should we use a slab cache ?
340 * This type of memory management is somewhat inefficient, but we use it
341 * anyway since SETCLIENTID is not a common operation.
342 */
343 static inline struct nfs4_client *
344 alloc_client(struct xdr_netobj name)
345 {
346 struct nfs4_client *clp;
347
348 if ((clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL))!= NULL) {
349 if ((clp->cl_name.data = kmalloc(name.len, GFP_KERNEL)) != NULL) {
350 memcpy(clp->cl_name.data, name.data, name.len);
351 clp->cl_name.len = name.len;
352 }
353 else {
354 kfree(clp);
355 clp = NULL;
356 }
357 }
358 return clp;
359 }
360
361 static void
362 shutdown_callback_client(struct nfs4_client *clp)
363 {
364 struct rpc_clnt *clnt = clp->cl_callback.cb_client;
365
366 /* shutdown rpc client, ending any outstanding recall rpcs */
367 if (clnt) {
368 clp->cl_callback.cb_client = NULL;
369 rpc_shutdown_client(clnt);
370 }
371 }
372
373 static inline void
374 free_client(struct nfs4_client *clp)
375 {
376 shutdown_callback_client(clp);
377 if (clp->cl_cred.cr_group_info)
378 put_group_info(clp->cl_cred.cr_group_info);
379 kfree(clp->cl_name.data);
380 kfree(clp);
381 }
382
383 void
384 put_nfs4_client(struct nfs4_client *clp)
385 {
386 if (atomic_dec_and_test(&clp->cl_count))
387 free_client(clp);
388 }
389
390 static void
391 expire_client(struct nfs4_client *clp)
392 {
393 struct nfs4_stateowner *sop;
394 struct nfs4_delegation *dp;
395 struct list_head reaplist;
396
397 dprintk("NFSD: expire_client cl_count %d\n",
398 atomic_read(&clp->cl_count));
399
400 INIT_LIST_HEAD(&reaplist);
401 spin_lock(&recall_lock);
402 while (!list_empty(&clp->cl_delegations)) {
403 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
404 dprintk("NFSD: expire client. dp %p, fp %p\n", dp,
405 dp->dl_flock);
406 list_del_init(&dp->dl_perclnt);
407 list_move(&dp->dl_recall_lru, &reaplist);
408 }
409 spin_unlock(&recall_lock);
410 while (!list_empty(&reaplist)) {
411 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
412 list_del_init(&dp->dl_recall_lru);
413 unhash_delegation(dp);
414 }
415 list_del(&clp->cl_idhash);
416 list_del(&clp->cl_strhash);
417 list_del(&clp->cl_lru);
418 while (!list_empty(&clp->cl_openowners)) {
419 sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient);
420 release_stateowner(sop);
421 }
422 put_nfs4_client(clp);
423 }
424
425 static struct nfs4_client *
426 create_client(struct xdr_netobj name, char *recdir) {
427 struct nfs4_client *clp;
428
429 if (!(clp = alloc_client(name)))
430 goto out;
431 memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
432 atomic_set(&clp->cl_count, 1);
433 atomic_set(&clp->cl_callback.cb_set, 0);
434 INIT_LIST_HEAD(&clp->cl_idhash);
435 INIT_LIST_HEAD(&clp->cl_strhash);
436 INIT_LIST_HEAD(&clp->cl_openowners);
437 INIT_LIST_HEAD(&clp->cl_delegations);
438 INIT_LIST_HEAD(&clp->cl_lru);
439 out:
440 return clp;
441 }
442
443 static void
444 copy_verf(struct nfs4_client *target, nfs4_verifier *source) {
445 memcpy(target->cl_verifier.data, source->data, sizeof(target->cl_verifier.data));
446 }
447
448 static void
449 copy_clid(struct nfs4_client *target, struct nfs4_client *source) {
450 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
451 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
452 }
453
454 static void
455 copy_cred(struct svc_cred *target, struct svc_cred *source) {
456
457 target->cr_uid = source->cr_uid;
458 target->cr_gid = source->cr_gid;
459 target->cr_group_info = source->cr_group_info;
460 get_group_info(target->cr_group_info);
461 }
462
463 static inline int
464 same_name(const char *n1, const char *n2)
465 {
466 return 0 == memcmp(n1, n2, HEXDIR_LEN);
467 }
468
469 static int
470 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
471 {
472 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
473 }
474
475 static int
476 same_clid(clientid_t *cl1, clientid_t *cl2)
477 {
478 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
479 }
480
481 /* XXX what about NGROUP */
482 static int
483 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
484 {
485 return cr1->cr_uid == cr2->cr_uid;
486 }
487
488 static void
489 gen_clid(struct nfs4_client *clp) {
490 clp->cl_clientid.cl_boot = boot_time;
491 clp->cl_clientid.cl_id = current_clientid++;
492 }
493
494 static void
495 gen_confirm(struct nfs4_client *clp) {
496 struct timespec tv;
497 u32 * p;
498
499 tv = CURRENT_TIME;
500 p = (u32 *)clp->cl_confirm.data;
501 *p++ = tv.tv_sec;
502 *p++ = tv.tv_nsec;
503 }
504
505 static int
506 check_name(struct xdr_netobj name) {
507
508 if (name.len == 0)
509 return 0;
510 if (name.len > NFS4_OPAQUE_LIMIT) {
511 dprintk("NFSD: check_name: name too long(%d)!\n", name.len);
512 return 0;
513 }
514 return 1;
515 }
516
517 static void
518 add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
519 {
520 unsigned int idhashval;
521
522 list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]);
523 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
524 list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]);
525 list_add_tail(&clp->cl_lru, &client_lru);
526 clp->cl_time = get_seconds();
527 }
528
529 static void
530 move_to_confirmed(struct nfs4_client *clp)
531 {
532 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
533 unsigned int strhashval;
534
535 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
536 list_del_init(&clp->cl_strhash);
537 list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
538 strhashval = clientstr_hashval(clp->cl_recdir);
539 list_add(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
540 renew_client(clp);
541 }
542
543 static struct nfs4_client *
544 find_confirmed_client(clientid_t *clid)
545 {
546 struct nfs4_client *clp;
547 unsigned int idhashval = clientid_hashval(clid->cl_id);
548
549 list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
550 if (same_clid(&clp->cl_clientid, clid))
551 return clp;
552 }
553 return NULL;
554 }
555
556 static struct nfs4_client *
557 find_unconfirmed_client(clientid_t *clid)
558 {
559 struct nfs4_client *clp;
560 unsigned int idhashval = clientid_hashval(clid->cl_id);
561
562 list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) {
563 if (same_clid(&clp->cl_clientid, clid))
564 return clp;
565 }
566 return NULL;
567 }
568
569 static struct nfs4_client *
570 find_confirmed_client_by_str(const char *dname, unsigned int hashval)
571 {
572 struct nfs4_client *clp;
573
574 list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) {
575 if (same_name(clp->cl_recdir, dname))
576 return clp;
577 }
578 return NULL;
579 }
580
581 static struct nfs4_client *
582 find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
583 {
584 struct nfs4_client *clp;
585
586 list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) {
587 if (same_name(clp->cl_recdir, dname))
588 return clp;
589 }
590 return NULL;
591 }
592
593 /* a helper function for parse_callback */
594 static int
595 parse_octet(unsigned int *lenp, char **addrp)
596 {
597 unsigned int len = *lenp;
598 char *p = *addrp;
599 int n = -1;
600 char c;
601
602 for (;;) {
603 if (!len)
604 break;
605 len--;
606 c = *p++;
607 if (c == '.')
608 break;
609 if ((c < '0') || (c > '9')) {
610 n = -1;
611 break;
612 }
613 if (n < 0)
614 n = 0;
615 n = (n * 10) + (c - '0');
616 if (n > 255) {
617 n = -1;
618 break;
619 }
620 }
621 *lenp = len;
622 *addrp = p;
623 return n;
624 }
625
626 /* parse and set the setclientid ipv4 callback address */
627 static int
628 parse_ipv4(unsigned int addr_len, char *addr_val, unsigned int *cbaddrp, unsigned short *cbportp)
629 {
630 int temp = 0;
631 u32 cbaddr = 0;
632 u16 cbport = 0;
633 u32 addrlen = addr_len;
634 char *addr = addr_val;
635 int i, shift;
636
637 /* ipaddress */
638 shift = 24;
639 for(i = 4; i > 0 ; i--) {
640 if ((temp = parse_octet(&addrlen, &addr)) < 0) {
641 return 0;
642 }
643 cbaddr |= (temp << shift);
644 if (shift > 0)
645 shift -= 8;
646 }
647 *cbaddrp = cbaddr;
648
649 /* port */
650 shift = 8;
651 for(i = 2; i > 0 ; i--) {
652 if ((temp = parse_octet(&addrlen, &addr)) < 0) {
653 return 0;
654 }
655 cbport |= (temp << shift);
656 if (shift > 0)
657 shift -= 8;
658 }
659 *cbportp = cbport;
660 return 1;
661 }
662
663 static void
664 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se)
665 {
666 struct nfs4_callback *cb = &clp->cl_callback;
667
668 /* Currently, we only support tcp for the callback channel */
669 if ((se->se_callback_netid_len != 3) || memcmp((char *)se->se_callback_netid_val, "tcp", 3))
670 goto out_err;
671
672 if ( !(parse_ipv4(se->se_callback_addr_len, se->se_callback_addr_val,
673 &cb->cb_addr, &cb->cb_port)))
674 goto out_err;
675 cb->cb_prog = se->se_callback_prog;
676 cb->cb_ident = se->se_callback_ident;
677 return;
678 out_err:
679 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
680 "will not receive delegations\n",
681 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
682
683 return;
684 }
685
686 __be32
687 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
688 struct nfsd4_setclientid *setclid)
689 {
690 struct sockaddr_in *sin = svc_addr_in(rqstp);
691 struct xdr_netobj clname = {
692 .len = setclid->se_namelen,
693 .data = setclid->se_name,
694 };
695 nfs4_verifier clverifier = setclid->se_verf;
696 unsigned int strhashval;
697 struct nfs4_client *conf, *unconf, *new;
698 __be32 status;
699 char dname[HEXDIR_LEN];
700
701 if (!check_name(clname))
702 return nfserr_inval;
703
704 status = nfs4_make_rec_clidname(dname, &clname);
705 if (status)
706 return status;
707
708 /*
709 * XXX The Duplicate Request Cache (DRC) has been checked (??)
710 * We get here on a DRC miss.
711 */
712
713 strhashval = clientstr_hashval(dname);
714
715 nfs4_lock_state();
716 conf = find_confirmed_client_by_str(dname, strhashval);
717 if (conf) {
718 /* RFC 3530 14.2.33 CASE 0: */
719 status = nfserr_clid_inuse;
720 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)
721 || conf->cl_addr != sin->sin_addr.s_addr) {
722 dprintk("NFSD: setclientid: string in use by client"
723 "at %u.%u.%u.%u\n", NIPQUAD(conf->cl_addr));
724 goto out;
725 }
726 }
727 /*
728 * section 14.2.33 of RFC 3530 (under the heading "IMPLEMENTATION")
729 * has a description of SETCLIENTID request processing consisting
730 * of 5 bullet points, labeled as CASE0 - CASE4 below.
731 */
732 unconf = find_unconfirmed_client_by_str(dname, strhashval);
733 status = nfserr_resource;
734 if (!conf) {
735 /*
736 * RFC 3530 14.2.33 CASE 4:
737 * placed first, because it is the normal case
738 */
739 if (unconf)
740 expire_client(unconf);
741 new = create_client(clname, dname);
742 if (new == NULL)
743 goto out;
744 gen_clid(new);
745 } else if (same_verf(&conf->cl_verifier, &clverifier)) {
746 /*
747 * RFC 3530 14.2.33 CASE 1:
748 * probable callback update
749 */
750 if (unconf) {
751 /* Note this is removing unconfirmed {*x***},
752 * which is stronger than RFC recommended {vxc**}.
753 * This has the advantage that there is at most
754 * one {*x***} in either list at any time.
755 */
756 expire_client(unconf);
757 }
758 new = create_client(clname, dname);
759 if (new == NULL)
760 goto out;
761 copy_clid(new, conf);
762 } else if (!unconf) {
763 /*
764 * RFC 3530 14.2.33 CASE 2:
765 * probable client reboot; state will be removed if
766 * confirmed.
767 */
768 new = create_client(clname, dname);
769 if (new == NULL)
770 goto out;
771 gen_clid(new);
772 } else if (!same_verf(&conf->cl_confirm, &unconf->cl_confirm)) {
773 /*
774 * RFC 3530 14.2.33 CASE 3:
775 * probable client reboot; state will be removed if
776 * confirmed.
777 */
778 expire_client(unconf);
779 new = create_client(clname, dname);
780 if (new == NULL)
781 goto out;
782 gen_clid(new);
783 } else {
784 /* No cases hit !!! */
785 status = nfserr_inval;
786 goto out;
787
788 }
789 copy_verf(new, &clverifier);
790 new->cl_addr = sin->sin_addr.s_addr;
791 copy_cred(&new->cl_cred, &rqstp->rq_cred);
792 gen_confirm(new);
793 gen_callback(new, setclid);
794 add_to_unconfirmed(new, strhashval);
795 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
796 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
797 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
798 status = nfs_ok;
799 out:
800 nfs4_unlock_state();
801 return status;
802 }
803
804
805 /*
806 * Section 14.2.34 of RFC 3530 (under the heading "IMPLEMENTATION") has
807 * a description of SETCLIENTID_CONFIRM request processing consisting of 4
808 * bullets, labeled as CASE1 - CASE4 below.
809 */
810 __be32
811 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
812 struct nfsd4_compound_state *cstate,
813 struct nfsd4_setclientid_confirm *setclientid_confirm)
814 {
815 struct sockaddr_in *sin = svc_addr_in(rqstp);
816 struct nfs4_client *conf, *unconf;
817 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
818 clientid_t * clid = &setclientid_confirm->sc_clientid;
819 __be32 status;
820
821 if (STALE_CLIENTID(clid))
822 return nfserr_stale_clientid;
823 /*
824 * XXX The Duplicate Request Cache (DRC) has been checked (??)
825 * We get here on a DRC miss.
826 */
827
828 nfs4_lock_state();
829
830 conf = find_confirmed_client(clid);
831 unconf = find_unconfirmed_client(clid);
832
833 status = nfserr_clid_inuse;
834 if (conf && conf->cl_addr != sin->sin_addr.s_addr)
835 goto out;
836 if (unconf && unconf->cl_addr != sin->sin_addr.s_addr)
837 goto out;
838
839 /*
840 * section 14.2.34 of RFC 3530 has a description of
841 * SETCLIENTID_CONFIRM request processing consisting
842 * of 4 bullet points, labeled as CASE1 - CASE4 below.
843 */
844 if ((conf && unconf) &&
845 (same_verf(&unconf->cl_confirm, &confirm)) &&
846 (same_verf(&conf->cl_verifier, &unconf->cl_verifier)) &&
847 (same_name(conf->cl_recdir,unconf->cl_recdir)) &&
848 (!same_verf(&conf->cl_confirm, &unconf->cl_confirm))) {
849 /*
850 * RFC 3530 14.2.34 CASE 1:
851 * callback update
852 */
853 if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
854 status = nfserr_clid_inuse;
855 else {
856 /* XXX: We just turn off callbacks until we can handle
857 * change request correctly. */
858 atomic_set(&conf->cl_callback.cb_set, 0);
859 gen_confirm(conf);
860 nfsd4_remove_clid_dir(unconf);
861 expire_client(unconf);
862 status = nfs_ok;
863
864 }
865 } else if ((conf && !unconf) ||
866 ((conf && unconf) &&
867 (!same_verf(&conf->cl_verifier, &unconf->cl_verifier) ||
868 !same_name(conf->cl_recdir, unconf->cl_recdir)))) {
869 /*
870 * RFC 3530 14.2.34 CASE 2:
871 * probable retransmitted request; play it safe and
872 * do nothing.
873 */
874 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred))
875 status = nfserr_clid_inuse;
876 else
877 status = nfs_ok;
878 } else if (!conf && unconf
879 && same_verf(&unconf->cl_confirm, &confirm)) {
880 /*
881 * RFC 3530 14.2.34 CASE 3:
882 * Normal case; new or rebooted client:
883 */
884 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
885 status = nfserr_clid_inuse;
886 } else {
887 unsigned int hash =
888 clientstr_hashval(unconf->cl_recdir);
889 conf = find_confirmed_client_by_str(unconf->cl_recdir,
890 hash);
891 if (conf) {
892 nfsd4_remove_clid_dir(conf);
893 expire_client(conf);
894 }
895 move_to_confirmed(unconf);
896 conf = unconf;
897 nfsd4_probe_callback(conf);
898 status = nfs_ok;
899 }
900 } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
901 && (!unconf || (unconf && !same_verf(&unconf->cl_confirm,
902 &confirm)))) {
903 /*
904 * RFC 3530 14.2.34 CASE 4:
905 * Client probably hasn't noticed that we rebooted yet.
906 */
907 status = nfserr_stale_clientid;
908 } else {
909 /* check that we have hit one of the cases...*/
910 status = nfserr_clid_inuse;
911 }
912 out:
913 nfs4_unlock_state();
914 return status;
915 }
916
917 /* OPEN Share state helper functions */
918 static inline struct nfs4_file *
919 alloc_init_file(struct inode *ino)
920 {
921 struct nfs4_file *fp;
922 unsigned int hashval = file_hashval(ino);
923
924 fp = kmem_cache_alloc(file_slab, GFP_KERNEL);
925 if (fp) {
926 kref_init(&fp->fi_ref);
927 INIT_LIST_HEAD(&fp->fi_hash);
928 INIT_LIST_HEAD(&fp->fi_stateids);
929 INIT_LIST_HEAD(&fp->fi_delegations);
930 list_add(&fp->fi_hash, &file_hashtbl[hashval]);
931 fp->fi_inode = igrab(ino);
932 fp->fi_id = current_fileid++;
933 fp->fi_had_conflict = false;
934 return fp;
935 }
936 return NULL;
937 }
938
939 static void
940 nfsd4_free_slab(struct kmem_cache **slab)
941 {
942 if (*slab == NULL)
943 return;
944 kmem_cache_destroy(*slab);
945 *slab = NULL;
946 }
947
948 void
949 nfsd4_free_slabs(void)
950 {
951 nfsd4_free_slab(&stateowner_slab);
952 nfsd4_free_slab(&file_slab);
953 nfsd4_free_slab(&stateid_slab);
954 nfsd4_free_slab(&deleg_slab);
955 }
956
957 static int
958 nfsd4_init_slabs(void)
959 {
960 stateowner_slab = kmem_cache_create("nfsd4_stateowners",
961 sizeof(struct nfs4_stateowner), 0, 0, NULL);
962 if (stateowner_slab == NULL)
963 goto out_nomem;
964 file_slab = kmem_cache_create("nfsd4_files",
965 sizeof(struct nfs4_file), 0, 0, NULL);
966 if (file_slab == NULL)
967 goto out_nomem;
968 stateid_slab = kmem_cache_create("nfsd4_stateids",
969 sizeof(struct nfs4_stateid), 0, 0, NULL);
970 if (stateid_slab == NULL)
971 goto out_nomem;
972 deleg_slab = kmem_cache_create("nfsd4_delegations",
973 sizeof(struct nfs4_delegation), 0, 0, NULL);
974 if (deleg_slab == NULL)
975 goto out_nomem;
976 return 0;
977 out_nomem:
978 nfsd4_free_slabs();
979 dprintk("nfsd4: out of memory while initializing nfsv4\n");
980 return -ENOMEM;
981 }
982
983 void
984 nfs4_free_stateowner(struct kref *kref)
985 {
986 struct nfs4_stateowner *sop =
987 container_of(kref, struct nfs4_stateowner, so_ref);
988 kfree(sop->so_owner.data);
989 kmem_cache_free(stateowner_slab, sop);
990 }
991
992 static inline struct nfs4_stateowner *
993 alloc_stateowner(struct xdr_netobj *owner)
994 {
995 struct nfs4_stateowner *sop;
996
997 if ((sop = kmem_cache_alloc(stateowner_slab, GFP_KERNEL))) {
998 if ((sop->so_owner.data = kmalloc(owner->len, GFP_KERNEL))) {
999 memcpy(sop->so_owner.data, owner->data, owner->len);
1000 sop->so_owner.len = owner->len;
1001 kref_init(&sop->so_ref);
1002 return sop;
1003 }
1004 kmem_cache_free(stateowner_slab, sop);
1005 }
1006 return NULL;
1007 }
1008
1009 static struct nfs4_stateowner *
1010 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
1011 struct nfs4_stateowner *sop;
1012 struct nfs4_replay *rp;
1013 unsigned int idhashval;
1014
1015 if (!(sop = alloc_stateowner(&open->op_owner)))
1016 return NULL;
1017 idhashval = ownerid_hashval(current_ownerid);
1018 INIT_LIST_HEAD(&sop->so_idhash);
1019 INIT_LIST_HEAD(&sop->so_strhash);
1020 INIT_LIST_HEAD(&sop->so_perclient);
1021 INIT_LIST_HEAD(&sop->so_stateids);
1022 INIT_LIST_HEAD(&sop->so_perstateid); /* not used */
1023 INIT_LIST_HEAD(&sop->so_close_lru);
1024 sop->so_time = 0;
1025 list_add(&sop->so_idhash, &ownerid_hashtbl[idhashval]);
1026 list_add(&sop->so_strhash, &ownerstr_hashtbl[strhashval]);
1027 list_add(&sop->so_perclient, &clp->cl_openowners);
1028 sop->so_is_open_owner = 1;
1029 sop->so_id = current_ownerid++;
1030 sop->so_client = clp;
1031 sop->so_seqid = open->op_seqid;
1032 sop->so_confirmed = 0;
1033 rp = &sop->so_replay;
1034 rp->rp_status = nfserr_serverfault;
1035 rp->rp_buflen = 0;
1036 rp->rp_buf = rp->rp_ibuf;
1037 return sop;
1038 }
1039
1040 static void
1041 release_stateid_lockowners(struct nfs4_stateid *open_stp)
1042 {
1043 struct nfs4_stateowner *lock_sop;
1044
1045 while (!list_empty(&open_stp->st_lockowners)) {
1046 lock_sop = list_entry(open_stp->st_lockowners.next,
1047 struct nfs4_stateowner, so_perstateid);
1048 /* list_del(&open_stp->st_lockowners); */
1049 BUG_ON(lock_sop->so_is_open_owner);
1050 release_stateowner(lock_sop);
1051 }
1052 }
1053
1054 static void
1055 unhash_stateowner(struct nfs4_stateowner *sop)
1056 {
1057 struct nfs4_stateid *stp;
1058
1059 list_del(&sop->so_idhash);
1060 list_del(&sop->so_strhash);
1061 if (sop->so_is_open_owner)
1062 list_del(&sop->so_perclient);
1063 list_del(&sop->so_perstateid);
1064 while (!list_empty(&sop->so_stateids)) {
1065 stp = list_entry(sop->so_stateids.next,
1066 struct nfs4_stateid, st_perstateowner);
1067 if (sop->so_is_open_owner)
1068 release_stateid(stp, OPEN_STATE);
1069 else
1070 release_stateid(stp, LOCK_STATE);
1071 }
1072 }
1073
1074 static void
1075 release_stateowner(struct nfs4_stateowner *sop)
1076 {
1077 unhash_stateowner(sop);
1078 list_del(&sop->so_close_lru);
1079 nfs4_put_stateowner(sop);
1080 }
1081
1082 static inline void
1083 init_stateid(struct nfs4_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
1084 struct nfs4_stateowner *sop = open->op_stateowner;
1085 unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id);
1086
1087 INIT_LIST_HEAD(&stp->st_hash);
1088 INIT_LIST_HEAD(&stp->st_perstateowner);
1089 INIT_LIST_HEAD(&stp->st_lockowners);
1090 INIT_LIST_HEAD(&stp->st_perfile);
1091 list_add(&stp->st_hash, &stateid_hashtbl[hashval]);
1092 list_add(&stp->st_perstateowner, &sop->so_stateids);
1093 list_add(&stp->st_perfile, &fp->fi_stateids);
1094 stp->st_stateowner = sop;
1095 get_nfs4_file(fp);
1096 stp->st_file = fp;
1097 stp->st_stateid.si_boot = boot_time;
1098 stp->st_stateid.si_stateownerid = sop->so_id;
1099 stp->st_stateid.si_fileid = fp->fi_id;
1100 stp->st_stateid.si_generation = 0;
1101 stp->st_access_bmap = 0;
1102 stp->st_deny_bmap = 0;
1103 __set_bit(open->op_share_access, &stp->st_access_bmap);
1104 __set_bit(open->op_share_deny, &stp->st_deny_bmap);
1105 stp->st_openstp = NULL;
1106 }
1107
1108 static void
1109 release_stateid(struct nfs4_stateid *stp, int flags)
1110 {
1111 struct file *filp = stp->st_vfs_file;
1112
1113 list_del(&stp->st_hash);
1114 list_del(&stp->st_perfile);
1115 list_del(&stp->st_perstateowner);
1116 if (flags & OPEN_STATE) {
1117 release_stateid_lockowners(stp);
1118 stp->st_vfs_file = NULL;
1119 nfsd_close(filp);
1120 } else if (flags & LOCK_STATE)
1121 locks_remove_posix(filp, (fl_owner_t) stp->st_stateowner);
1122 put_nfs4_file(stp->st_file);
1123 kmem_cache_free(stateid_slab, stp);
1124 }
1125
1126 static void
1127 move_to_close_lru(struct nfs4_stateowner *sop)
1128 {
1129 dprintk("NFSD: move_to_close_lru nfs4_stateowner %p\n", sop);
1130
1131 list_move_tail(&sop->so_close_lru, &close_lru);
1132 sop->so_time = get_seconds();
1133 }
1134
1135 static int
1136 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
1137 clientid_t *clid)
1138 {
1139 return (sop->so_owner.len == owner->len) &&
1140 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
1141 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
1142 }
1143
1144 static struct nfs4_stateowner *
1145 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open)
1146 {
1147 struct nfs4_stateowner *so = NULL;
1148
1149 list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) {
1150 if (same_owner_str(so, &open->op_owner, &open->op_clientid))
1151 return so;
1152 }
1153 return NULL;
1154 }
1155
1156 /* search file_hashtbl[] for file */
1157 static struct nfs4_file *
1158 find_file(struct inode *ino)
1159 {
1160 unsigned int hashval = file_hashval(ino);
1161 struct nfs4_file *fp;
1162
1163 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
1164 if (fp->fi_inode == ino) {
1165 get_nfs4_file(fp);
1166 return fp;
1167 }
1168 }
1169 return NULL;
1170 }
1171
1172 static int access_valid(u32 x)
1173 {
1174 return (x > 0 && x < 4);
1175 }
1176
1177 static int deny_valid(u32 x)
1178 {
1179 return (x >= 0 && x < 5);
1180 }
1181
1182 static void
1183 set_access(unsigned int *access, unsigned long bmap) {
1184 int i;
1185
1186 *access = 0;
1187 for (i = 1; i < 4; i++) {
1188 if (test_bit(i, &bmap))
1189 *access |= i;
1190 }
1191 }
1192
1193 static void
1194 set_deny(unsigned int *deny, unsigned long bmap) {
1195 int i;
1196
1197 *deny = 0;
1198 for (i = 0; i < 4; i++) {
1199 if (test_bit(i, &bmap))
1200 *deny |= i ;
1201 }
1202 }
1203
1204 static int
1205 test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
1206 unsigned int access, deny;
1207
1208 set_access(&access, stp->st_access_bmap);
1209 set_deny(&deny, stp->st_deny_bmap);
1210 if ((access & open->op_share_deny) || (deny & open->op_share_access))
1211 return 0;
1212 return 1;
1213 }
1214
1215 /*
1216 * Called to check deny when READ with all zero stateid or
1217 * WRITE with all zero or all one stateid
1218 */
1219 static __be32
1220 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
1221 {
1222 struct inode *ino = current_fh->fh_dentry->d_inode;
1223 struct nfs4_file *fp;
1224 struct nfs4_stateid *stp;
1225 __be32 ret;
1226
1227 dprintk("NFSD: nfs4_share_conflict\n");
1228
1229 fp = find_file(ino);
1230 if (!fp)
1231 return nfs_ok;
1232 ret = nfserr_locked;
1233 /* Search for conflicting share reservations */
1234 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
1235 if (test_bit(deny_type, &stp->st_deny_bmap) ||
1236 test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap))
1237 goto out;
1238 }
1239 ret = nfs_ok;
1240 out:
1241 put_nfs4_file(fp);
1242 return ret;
1243 }
1244
1245 static inline void
1246 nfs4_file_downgrade(struct file *filp, unsigned int share_access)
1247 {
1248 if (share_access & NFS4_SHARE_ACCESS_WRITE) {
1249 put_write_access(filp->f_path.dentry->d_inode);
1250 filp->f_mode = (filp->f_mode | FMODE_READ) & ~FMODE_WRITE;
1251 }
1252 }
1253
1254 /*
1255 * Recall a delegation
1256 */
1257 static int
1258 do_recall(void *__dp)
1259 {
1260 struct nfs4_delegation *dp = __dp;
1261
1262 dp->dl_file->fi_had_conflict = true;
1263 nfsd4_cb_recall(dp);
1264 return 0;
1265 }
1266
1267 /*
1268 * Spawn a thread to perform a recall on the delegation represented
1269 * by the lease (file_lock)
1270 *
1271 * Called from break_lease() with lock_kernel() held.
1272 * Note: we assume break_lease will only call this *once* for any given
1273 * lease.
1274 */
1275 static
1276 void nfsd_break_deleg_cb(struct file_lock *fl)
1277 {
1278 struct nfs4_delegation *dp= (struct nfs4_delegation *)fl->fl_owner;
1279 struct task_struct *t;
1280
1281 dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl);
1282 if (!dp)
1283 return;
1284
1285 /* We're assuming the state code never drops its reference
1286 * without first removing the lease. Since we're in this lease
1287 * callback (and since the lease code is serialized by the kernel
1288 * lock) we know the server hasn't removed the lease yet, we know
1289 * it's safe to take a reference: */
1290 atomic_inc(&dp->dl_count);
1291 atomic_inc(&dp->dl_client->cl_count);
1292
1293 spin_lock(&recall_lock);
1294 list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
1295 spin_unlock(&recall_lock);
1296
1297 /* only place dl_time is set. protected by lock_kernel*/
1298 dp->dl_time = get_seconds();
1299
1300 /*
1301 * We don't want the locks code to timeout the lease for us;
1302 * we'll remove it ourself if the delegation isn't returned
1303 * in time.
1304 */
1305 fl->fl_break_time = 0;
1306
1307 t = kthread_run(do_recall, dp, "%s", "nfs4_cb_recall");
1308 if (IS_ERR(t)) {
1309 struct nfs4_client *clp = dp->dl_client;
1310
1311 printk(KERN_INFO "NFSD: Callback thread failed for "
1312 "for client (clientid %08x/%08x)\n",
1313 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1314 put_nfs4_client(dp->dl_client);
1315 nfs4_put_delegation(dp);
1316 }
1317 }
1318
1319 /*
1320 * The file_lock is being reapd.
1321 *
1322 * Called by locks_free_lock() with lock_kernel() held.
1323 */
1324 static
1325 void nfsd_release_deleg_cb(struct file_lock *fl)
1326 {
1327 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
1328
1329 dprintk("NFSD nfsd_release_deleg_cb: fl %p dp %p dl_count %d\n", fl,dp, atomic_read(&dp->dl_count));
1330
1331 if (!(fl->fl_flags & FL_LEASE) || !dp)
1332 return;
1333 dp->dl_flock = NULL;
1334 }
1335
1336 /*
1337 * Set the delegation file_lock back pointer.
1338 *
1339 * Called from setlease() with lock_kernel() held.
1340 */
1341 static
1342 void nfsd_copy_lock_deleg_cb(struct file_lock *new, struct file_lock *fl)
1343 {
1344 struct nfs4_delegation *dp = (struct nfs4_delegation *)new->fl_owner;
1345
1346 dprintk("NFSD: nfsd_copy_lock_deleg_cb: new fl %p dp %p\n", new, dp);
1347 if (!dp)
1348 return;
1349 dp->dl_flock = new;
1350 }
1351
1352 /*
1353 * Called from setlease() with lock_kernel() held
1354 */
1355 static
1356 int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try)
1357 {
1358 struct nfs4_delegation *onlistd =
1359 (struct nfs4_delegation *)onlist->fl_owner;
1360 struct nfs4_delegation *tryd =
1361 (struct nfs4_delegation *)try->fl_owner;
1362
1363 if (onlist->fl_lmops != try->fl_lmops)
1364 return 0;
1365
1366 return onlistd->dl_client == tryd->dl_client;
1367 }
1368
1369
1370 static
1371 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
1372 {
1373 if (arg & F_UNLCK)
1374 return lease_modify(onlist, arg);
1375 else
1376 return -EAGAIN;
1377 }
1378
1379 static struct lock_manager_operations nfsd_lease_mng_ops = {
1380 .fl_break = nfsd_break_deleg_cb,
1381 .fl_release_private = nfsd_release_deleg_cb,
1382 .fl_copy_lock = nfsd_copy_lock_deleg_cb,
1383 .fl_mylease = nfsd_same_client_deleg_cb,
1384 .fl_change = nfsd_change_deleg_cb,
1385 };
1386
1387
1388 __be32
1389 nfsd4_process_open1(struct nfsd4_open *open)
1390 {
1391 clientid_t *clientid = &open->op_clientid;
1392 struct nfs4_client *clp = NULL;
1393 unsigned int strhashval;
1394 struct nfs4_stateowner *sop = NULL;
1395
1396 if (!check_name(open->op_owner))
1397 return nfserr_inval;
1398
1399 if (STALE_CLIENTID(&open->op_clientid))
1400 return nfserr_stale_clientid;
1401
1402 strhashval = ownerstr_hashval(clientid->cl_id, open->op_owner);
1403 sop = find_openstateowner_str(strhashval, open);
1404 open->op_stateowner = sop;
1405 if (!sop) {
1406 /* Make sure the client's lease hasn't expired. */
1407 clp = find_confirmed_client(clientid);
1408 if (clp == NULL)
1409 return nfserr_expired;
1410 goto renew;
1411 }
1412 if (!sop->so_confirmed) {
1413 /* Replace unconfirmed owners without checking for replay. */
1414 clp = sop->so_client;
1415 release_stateowner(sop);
1416 open->op_stateowner = NULL;
1417 goto renew;
1418 }
1419 if (open->op_seqid == sop->so_seqid - 1) {
1420 if (sop->so_replay.rp_buflen)
1421 return nfserr_replay_me;
1422 /* The original OPEN failed so spectacularly
1423 * that we don't even have replay data saved!
1424 * Therefore, we have no choice but to continue
1425 * processing this OPEN; presumably, we'll
1426 * fail again for the same reason.
1427 */
1428 dprintk("nfsd4_process_open1: replay with no replay cache\n");
1429 goto renew;
1430 }
1431 if (open->op_seqid != sop->so_seqid)
1432 return nfserr_bad_seqid;
1433 renew:
1434 if (open->op_stateowner == NULL) {
1435 sop = alloc_init_open_stateowner(strhashval, clp, open);
1436 if (sop == NULL)
1437 return nfserr_resource;
1438 open->op_stateowner = sop;
1439 }
1440 list_del_init(&sop->so_close_lru);
1441 renew_client(sop->so_client);
1442 return nfs_ok;
1443 }
1444
1445 static inline __be32
1446 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
1447 {
1448 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
1449 return nfserr_openmode;
1450 else
1451 return nfs_ok;
1452 }
1453
1454 static struct nfs4_delegation *
1455 find_delegation_file(struct nfs4_file *fp, stateid_t *stid)
1456 {
1457 struct nfs4_delegation *dp;
1458
1459 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) {
1460 if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid)
1461 return dp;
1462 }
1463 return NULL;
1464 }
1465
1466 static __be32
1467 nfs4_check_deleg(struct nfs4_file *fp, struct nfsd4_open *open,
1468 struct nfs4_delegation **dp)
1469 {
1470 int flags;
1471 __be32 status = nfserr_bad_stateid;
1472
1473 *dp = find_delegation_file(fp, &open->op_delegate_stateid);
1474 if (*dp == NULL)
1475 goto out;
1476 flags = open->op_share_access == NFS4_SHARE_ACCESS_READ ?
1477 RD_STATE : WR_STATE;
1478 status = nfs4_check_delegmode(*dp, flags);
1479 if (status)
1480 *dp = NULL;
1481 out:
1482 if (open->op_claim_type != NFS4_OPEN_CLAIM_DELEGATE_CUR)
1483 return nfs_ok;
1484 if (status)
1485 return status;
1486 open->op_stateowner->so_confirmed = 1;
1487 return nfs_ok;
1488 }
1489
1490 static __be32
1491 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_stateid **stpp)
1492 {
1493 struct nfs4_stateid *local;
1494 __be32 status = nfserr_share_denied;
1495 struct nfs4_stateowner *sop = open->op_stateowner;
1496
1497 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
1498 /* ignore lock owners */
1499 if (local->st_stateowner->so_is_open_owner == 0)
1500 continue;
1501 /* remember if we have seen this open owner */
1502 if (local->st_stateowner == sop)
1503 *stpp = local;
1504 /* check for conflicting share reservations */
1505 if (!test_share(local, open))
1506 goto out;
1507 }
1508 status = 0;
1509 out:
1510 return status;
1511 }
1512
1513 static inline struct nfs4_stateid *
1514 nfs4_alloc_stateid(void)
1515 {
1516 return kmem_cache_alloc(stateid_slab, GFP_KERNEL);
1517 }
1518
1519 static __be32
1520 nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_stateid **stpp,
1521 struct nfs4_delegation *dp,
1522 struct svc_fh *cur_fh, int flags)
1523 {
1524 struct nfs4_stateid *stp;
1525
1526 stp = nfs4_alloc_stateid();
1527 if (stp == NULL)
1528 return nfserr_resource;
1529
1530 if (dp) {
1531 get_file(dp->dl_vfs_file);
1532 stp->st_vfs_file = dp->dl_vfs_file;
1533 } else {
1534 __be32 status;
1535 status = nfsd_open(rqstp, cur_fh, S_IFREG, flags,
1536 &stp->st_vfs_file);
1537 if (status) {
1538 if (status == nfserr_dropit)
1539 status = nfserr_jukebox;
1540 kmem_cache_free(stateid_slab, stp);
1541 return status;
1542 }
1543 }
1544 *stpp = stp;
1545 return 0;
1546 }
1547
1548 static inline __be32
1549 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
1550 struct nfsd4_open *open)
1551 {
1552 struct iattr iattr = {
1553 .ia_valid = ATTR_SIZE,
1554 .ia_size = 0,
1555 };
1556 if (!open->op_truncate)
1557 return 0;
1558 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
1559 return nfserr_inval;
1560 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
1561 }
1562
1563 static __be32
1564 nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
1565 {
1566 struct file *filp = stp->st_vfs_file;
1567 struct inode *inode = filp->f_path.dentry->d_inode;
1568 unsigned int share_access, new_writer;
1569 __be32 status;
1570
1571 set_access(&share_access, stp->st_access_bmap);
1572 new_writer = (~share_access) & open->op_share_access
1573 & NFS4_SHARE_ACCESS_WRITE;
1574
1575 if (new_writer) {
1576 int err = get_write_access(inode);
1577 if (err)
1578 return nfserrno(err);
1579 }
1580 status = nfsd4_truncate(rqstp, cur_fh, open);
1581 if (status) {
1582 if (new_writer)
1583 put_write_access(inode);
1584 return status;
1585 }
1586 /* remember the open */
1587 filp->f_mode |= open->op_share_access;
1588 set_bit(open->op_share_access, &stp->st_access_bmap);
1589 set_bit(open->op_share_deny, &stp->st_deny_bmap);
1590
1591 return nfs_ok;
1592 }
1593
1594
1595 static void
1596 nfs4_set_claim_prev(struct nfsd4_open *open)
1597 {
1598 open->op_stateowner->so_confirmed = 1;
1599 open->op_stateowner->so_client->cl_firststate = 1;
1600 }
1601
1602 /*
1603 * Attempt to hand out a delegation.
1604 */
1605 static void
1606 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_stateid *stp)
1607 {
1608 struct nfs4_delegation *dp;
1609 struct nfs4_stateowner *sop = stp->st_stateowner;
1610 struct nfs4_callback *cb = &sop->so_client->cl_callback;
1611 struct file_lock fl, *flp = &fl;
1612 int status, flag = 0;
1613
1614 flag = NFS4_OPEN_DELEGATE_NONE;
1615 open->op_recall = 0;
1616 switch (open->op_claim_type) {
1617 case NFS4_OPEN_CLAIM_PREVIOUS:
1618 if (!atomic_read(&cb->cb_set))
1619 open->op_recall = 1;
1620 flag = open->op_delegate_type;
1621 if (flag == NFS4_OPEN_DELEGATE_NONE)
1622 goto out;
1623 break;
1624 case NFS4_OPEN_CLAIM_NULL:
1625 /* Let's not give out any delegations till everyone's
1626 * had the chance to reclaim theirs.... */
1627 if (nfs4_in_grace())
1628 goto out;
1629 if (!atomic_read(&cb->cb_set) || !sop->so_confirmed)
1630 goto out;
1631 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
1632 flag = NFS4_OPEN_DELEGATE_WRITE;
1633 else
1634 flag = NFS4_OPEN_DELEGATE_READ;
1635 break;
1636 default:
1637 goto out;
1638 }
1639
1640 dp = alloc_init_deleg(sop->so_client, stp, fh, flag);
1641 if (dp == NULL) {
1642 flag = NFS4_OPEN_DELEGATE_NONE;
1643 goto out;
1644 }
1645 locks_init_lock(&fl);
1646 fl.fl_lmops = &nfsd_lease_mng_ops;
1647 fl.fl_flags = FL_LEASE;
1648 fl.fl_end = OFFSET_MAX;
1649 fl.fl_owner = (fl_owner_t)dp;
1650 fl.fl_file = stp->st_vfs_file;
1651 fl.fl_pid = current->tgid;
1652
1653 /* vfs_setlease checks to see if delegation should be handed out.
1654 * the lock_manager callbacks fl_mylease and fl_change are used
1655 */
1656 if ((status = vfs_setlease(stp->st_vfs_file,
1657 flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK, &flp))) {
1658 dprintk("NFSD: setlease failed [%d], no delegation\n", status);
1659 unhash_delegation(dp);
1660 flag = NFS4_OPEN_DELEGATE_NONE;
1661 goto out;
1662 }
1663
1664 memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid));
1665
1666 dprintk("NFSD: delegation stateid=(%08x/%08x/%08x/%08x)\n\n",
1667 dp->dl_stateid.si_boot,
1668 dp->dl_stateid.si_stateownerid,
1669 dp->dl_stateid.si_fileid,
1670 dp->dl_stateid.si_generation);
1671 out:
1672 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS
1673 && flag == NFS4_OPEN_DELEGATE_NONE
1674 && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
1675 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
1676 open->op_delegate_type = flag;
1677 }
1678
1679 /*
1680 * called with nfs4_lock_state() held.
1681 */
1682 __be32
1683 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
1684 {
1685 struct nfs4_file *fp = NULL;
1686 struct inode *ino = current_fh->fh_dentry->d_inode;
1687 struct nfs4_stateid *stp = NULL;
1688 struct nfs4_delegation *dp = NULL;
1689 __be32 status;
1690
1691 status = nfserr_inval;
1692 if (!access_valid(open->op_share_access)
1693 || !deny_valid(open->op_share_deny))
1694 goto out;
1695 /*
1696 * Lookup file; if found, lookup stateid and check open request,
1697 * and check for delegations in the process of being recalled.
1698 * If not found, create the nfs4_file struct
1699 */
1700 fp = find_file(ino);
1701 if (fp) {
1702 if ((status = nfs4_check_open(fp, open, &stp)))
1703 goto out;
1704 status = nfs4_check_deleg(fp, open, &dp);
1705 if (status)
1706 goto out;
1707 } else {
1708 status = nfserr_bad_stateid;
1709 if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
1710 goto out;
1711 status = nfserr_resource;
1712 fp = alloc_init_file(ino);
1713 if (fp == NULL)
1714 goto out;
1715 }
1716
1717 /*
1718 * OPEN the file, or upgrade an existing OPEN.
1719 * If truncate fails, the OPEN fails.
1720 */
1721 if (stp) {
1722 /* Stateid was found, this is an OPEN upgrade */
1723 status = nfs4_upgrade_open(rqstp, current_fh, stp, open);
1724 if (status)
1725 goto out;
1726 update_stateid(&stp->st_stateid);
1727 } else {
1728 /* Stateid was not found, this is a new OPEN */
1729 int flags = 0;
1730 if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
1731 flags |= MAY_READ;
1732 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
1733 flags |= MAY_WRITE;
1734 status = nfs4_new_open(rqstp, &stp, dp, current_fh, flags);
1735 if (status)
1736 goto out;
1737 init_stateid(stp, fp, open);
1738 status = nfsd4_truncate(rqstp, current_fh, open);
1739 if (status) {
1740 release_stateid(stp, OPEN_STATE);
1741 goto out;
1742 }
1743 }
1744 memcpy(&open->op_stateid, &stp->st_stateid, sizeof(stateid_t));
1745
1746 /*
1747 * Attempt to hand out a delegation. No error return, because the
1748 * OPEN succeeds even if we fail.
1749 */
1750 nfs4_open_delegation(current_fh, open, stp);
1751
1752 status = nfs_ok;
1753
1754 dprintk("nfs4_process_open2: stateid=(%08x/%08x/%08x/%08x)\n",
1755 stp->st_stateid.si_boot, stp->st_stateid.si_stateownerid,
1756 stp->st_stateid.si_fileid, stp->st_stateid.si_generation);
1757 out:
1758 if (fp)
1759 put_nfs4_file(fp);
1760 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
1761 nfs4_set_claim_prev(open);
1762 /*
1763 * To finish the open response, we just need to set the rflags.
1764 */
1765 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
1766 if (!open->op_stateowner->so_confirmed)
1767 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
1768
1769 return status;
1770 }
1771
1772 static struct workqueue_struct *laundry_wq;
1773 static void laundromat_main(struct work_struct *);
1774 static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
1775
1776 __be32
1777 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1778 clientid_t *clid)
1779 {
1780 struct nfs4_client *clp;
1781 __be32 status;
1782
1783 nfs4_lock_state();
1784 dprintk("process_renew(%08x/%08x): starting\n",
1785 clid->cl_boot, clid->cl_id);
1786 status = nfserr_stale_clientid;
1787 if (STALE_CLIENTID(clid))
1788 goto out;
1789 clp = find_confirmed_client(clid);
1790 status = nfserr_expired;
1791 if (clp == NULL) {
1792 /* We assume the client took too long to RENEW. */
1793 dprintk("nfsd4_renew: clientid not found!\n");
1794 goto out;
1795 }
1796 renew_client(clp);
1797 status = nfserr_cb_path_down;
1798 if (!list_empty(&clp->cl_delegations)
1799 && !atomic_read(&clp->cl_callback.cb_set))
1800 goto out;
1801 status = nfs_ok;
1802 out:
1803 nfs4_unlock_state();
1804 return status;
1805 }
1806
1807 static void
1808 end_grace(void)
1809 {
1810 dprintk("NFSD: end of grace period\n");
1811 nfsd4_recdir_purge_old();
1812 in_grace = 0;
1813 }
1814
1815 static time_t
1816 nfs4_laundromat(void)
1817 {
1818 struct nfs4_client *clp;
1819 struct nfs4_stateowner *sop;
1820 struct nfs4_delegation *dp;
1821 struct list_head *pos, *next, reaplist;
1822 time_t cutoff = get_seconds() - NFSD_LEASE_TIME;
1823 time_t t, clientid_val = NFSD_LEASE_TIME;
1824 time_t u, test_val = NFSD_LEASE_TIME;
1825
1826 nfs4_lock_state();
1827
1828 dprintk("NFSD: laundromat service - starting\n");
1829 if (in_grace)
1830 end_grace();
1831 list_for_each_safe(pos, next, &client_lru) {
1832 clp = list_entry(pos, struct nfs4_client, cl_lru);
1833 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
1834 t = clp->cl_time - cutoff;
1835 if (clientid_val > t)
1836 clientid_val = t;
1837 break;
1838 }
1839 dprintk("NFSD: purging unused client (clientid %08x)\n",
1840 clp->cl_clientid.cl_id);
1841 nfsd4_remove_clid_dir(clp);
1842 expire_client(clp);
1843 }
1844 INIT_LIST_HEAD(&reaplist);
1845 spin_lock(&recall_lock);
1846 list_for_each_safe(pos, next, &del_recall_lru) {
1847 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
1848 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
1849 u = dp->dl_time - cutoff;
1850 if (test_val > u)
1851 test_val = u;
1852 break;
1853 }
1854 dprintk("NFSD: purging unused delegation dp %p, fp %p\n",
1855 dp, dp->dl_flock);
1856 list_move(&dp->dl_recall_lru, &reaplist);
1857 }
1858 spin_unlock(&recall_lock);
1859 list_for_each_safe(pos, next, &reaplist) {
1860 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
1861 list_del_init(&dp->dl_recall_lru);
1862 unhash_delegation(dp);
1863 }
1864 test_val = NFSD_LEASE_TIME;
1865 list_for_each_safe(pos, next, &close_lru) {
1866 sop = list_entry(pos, struct nfs4_stateowner, so_close_lru);
1867 if (time_after((unsigned long)sop->so_time, (unsigned long)cutoff)) {
1868 u = sop->so_time - cutoff;
1869 if (test_val > u)
1870 test_val = u;
1871 break;
1872 }
1873 dprintk("NFSD: purging unused open stateowner (so_id %d)\n",
1874 sop->so_id);
1875 release_stateowner(sop);
1876 }
1877 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
1878 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
1879 nfs4_unlock_state();
1880 return clientid_val;
1881 }
1882
1883 void
1884 laundromat_main(struct work_struct *not_used)
1885 {
1886 time_t t;
1887
1888 t = nfs4_laundromat();
1889 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
1890 queue_delayed_work(laundry_wq, &laundromat_work, t*HZ);
1891 }
1892
1893 static struct nfs4_stateowner *
1894 search_close_lru(u32 st_id, int flags)
1895 {
1896 struct nfs4_stateowner *local = NULL;
1897
1898 if (flags & CLOSE_STATE) {
1899 list_for_each_entry(local, &close_lru, so_close_lru) {
1900 if (local->so_id == st_id)
1901 return local;
1902 }
1903 }
1904 return NULL;
1905 }
1906
1907 static inline int
1908 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
1909 {
1910 return fhp->fh_dentry->d_inode != stp->st_vfs_file->f_path.dentry->d_inode;
1911 }
1912
1913 static int
1914 STALE_STATEID(stateid_t *stateid)
1915 {
1916 if (stateid->si_boot == boot_time)
1917 return 0;
1918 dprintk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n",
1919 stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid,
1920 stateid->si_generation);
1921 return 1;
1922 }
1923
1924 static inline int
1925 access_permit_read(unsigned long access_bmap)
1926 {
1927 return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) ||
1928 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) ||
1929 test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap);
1930 }
1931
1932 static inline int
1933 access_permit_write(unsigned long access_bmap)
1934 {
1935 return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) ||
1936 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap);
1937 }
1938
1939 static
1940 __be32 nfs4_check_openmode(struct nfs4_stateid *stp, int flags)
1941 {
1942 __be32 status = nfserr_openmode;
1943
1944 if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap)))
1945 goto out;
1946 if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap)))
1947 goto out;
1948 status = nfs_ok;
1949 out:
1950 return status;
1951 }
1952
1953 static inline __be32
1954 check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
1955 {
1956 /* Trying to call delegreturn with a special stateid? Yuch: */
1957 if (!(flags & (RD_STATE | WR_STATE)))
1958 return nfserr_bad_stateid;
1959 else if (ONE_STATEID(stateid) && (flags & RD_STATE))
1960 return nfs_ok;
1961 else if (nfs4_in_grace()) {
1962 /* Answer in remaining cases depends on existance of
1963 * conflicting state; so we must wait out the grace period. */
1964 return nfserr_grace;
1965 } else if (flags & WR_STATE)
1966 return nfs4_share_conflict(current_fh,
1967 NFS4_SHARE_DENY_WRITE);
1968 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
1969 return nfs4_share_conflict(current_fh,
1970 NFS4_SHARE_DENY_READ);
1971 }
1972
1973 /*
1974 * Allow READ/WRITE during grace period on recovered state only for files
1975 * that are not able to provide mandatory locking.
1976 */
1977 static inline int
1978 io_during_grace_disallowed(struct inode *inode, int flags)
1979 {
1980 return nfs4_in_grace() && (flags & (RD_STATE | WR_STATE))
1981 && mandatory_lock(inode);
1982 }
1983
1984 /*
1985 * Checks for stateid operations
1986 */
1987 __be32
1988 nfs4_preprocess_stateid_op(struct svc_fh *current_fh, stateid_t *stateid, int flags, struct file **filpp)
1989 {
1990 struct nfs4_stateid *stp = NULL;
1991 struct nfs4_delegation *dp = NULL;
1992 stateid_t *stidp;
1993 struct inode *ino = current_fh->fh_dentry->d_inode;
1994 __be32 status;
1995
1996 dprintk("NFSD: preprocess_stateid_op: stateid = (%08x/%08x/%08x/%08x)\n",
1997 stateid->si_boot, stateid->si_stateownerid,
1998 stateid->si_fileid, stateid->si_generation);
1999 if (filpp)
2000 *filpp = NULL;
2001
2002 if (io_during_grace_disallowed(ino, flags))
2003 return nfserr_grace;
2004
2005 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
2006 return check_special_stateids(current_fh, stateid, flags);
2007
2008 /* STALE STATEID */
2009 status = nfserr_stale_stateid;
2010 if (STALE_STATEID(stateid))
2011 goto out;
2012
2013 /* BAD STATEID */
2014 status = nfserr_bad_stateid;
2015 if (!stateid->si_fileid) { /* delegation stateid */
2016 if(!(dp = find_delegation_stateid(ino, stateid))) {
2017 dprintk("NFSD: delegation stateid not found\n");
2018 goto out;
2019 }
2020 stidp = &dp->dl_stateid;
2021 } else { /* open or lock stateid */
2022 if (!(stp = find_stateid(stateid, flags))) {
2023 dprintk("NFSD: open or lock stateid not found\n");
2024 goto out;
2025 }
2026 if ((flags & CHECK_FH) && nfs4_check_fh(current_fh, stp))
2027 goto out;
2028 if (!stp->st_stateowner->so_confirmed)
2029 goto out;
2030 stidp = &stp->st_stateid;
2031 }
2032 if (stateid->si_generation > stidp->si_generation)
2033 goto out;
2034
2035 /* OLD STATEID */
2036 status = nfserr_old_stateid;
2037 if (stateid->si_generation < stidp->si_generation)
2038 goto out;
2039 if (stp) {
2040 if ((status = nfs4_check_openmode(stp,flags)))
2041 goto out;
2042 renew_client(stp->st_stateowner->so_client);
2043 if (filpp)
2044 *filpp = stp->st_vfs_file;
2045 } else if (dp) {
2046 if ((status = nfs4_check_delegmode(dp, flags)))
2047 goto out;
2048 renew_client(dp->dl_client);
2049 if (flags & DELEG_RET)
2050 unhash_delegation(dp);
2051 if (filpp)
2052 *filpp = dp->dl_vfs_file;
2053 }
2054 status = nfs_ok;
2055 out:
2056 return status;
2057 }
2058
2059 static inline int
2060 setlkflg (int type)
2061 {
2062 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
2063 RD_STATE : WR_STATE;
2064 }
2065
2066 /*
2067 * Checks for sequence id mutating operations.
2068 */
2069 static __be32
2070 nfs4_preprocess_seqid_op(struct svc_fh *current_fh, u32 seqid, stateid_t *stateid, int flags, struct nfs4_stateowner **sopp, struct nfs4_stateid **stpp, struct nfsd4_lock *lock)
2071 {
2072 struct nfs4_stateid *stp;
2073 struct nfs4_stateowner *sop;
2074
2075 dprintk("NFSD: preprocess_seqid_op: seqid=%d "
2076 "stateid = (%08x/%08x/%08x/%08x)\n", seqid,
2077 stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid,
2078 stateid->si_generation);
2079
2080 *stpp = NULL;
2081 *sopp = NULL;
2082
2083 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
2084 dprintk("NFSD: preprocess_seqid_op: magic stateid!\n");
2085 return nfserr_bad_stateid;
2086 }
2087
2088 if (STALE_STATEID(stateid))
2089 return nfserr_stale_stateid;
2090 /*
2091 * We return BAD_STATEID if filehandle doesn't match stateid,
2092 * the confirmed flag is incorrecly set, or the generation
2093 * number is incorrect.
2094 */
2095 stp = find_stateid(stateid, flags);
2096 if (stp == NULL) {
2097 /*
2098 * Also, we should make sure this isn't just the result of
2099 * a replayed close:
2100 */
2101 sop = search_close_lru(stateid->si_stateownerid, flags);
2102 if (sop == NULL)
2103 return nfserr_bad_stateid;
2104 *sopp = sop;
2105 goto check_replay;
2106 }
2107
2108 if (lock) {
2109 struct nfs4_stateowner *sop = stp->st_stateowner;
2110 clientid_t *lockclid = &lock->v.new.clientid;
2111 struct nfs4_client *clp = sop->so_client;
2112 int lkflg = 0;
2113 __be32 status;
2114
2115 lkflg = setlkflg(lock->lk_type);
2116
2117 if (lock->lk_is_new) {
2118 if (!sop->so_is_open_owner)
2119 return nfserr_bad_stateid;
2120 if (!same_clid(&clp->cl_clientid, lockclid))
2121 return nfserr_bad_stateid;
2122 /* stp is the open stateid */
2123 status = nfs4_check_openmode(stp, lkflg);
2124 if (status)
2125 return status;
2126 } else {
2127 /* stp is the lock stateid */
2128 status = nfs4_check_openmode(stp->st_openstp, lkflg);
2129 if (status)
2130 return status;
2131 }
2132 }
2133
2134 if ((flags & CHECK_FH) && nfs4_check_fh(current_fh, stp)) {
2135 dprintk("NFSD: preprocess_seqid_op: fh-stateid mismatch!\n");
2136 return nfserr_bad_stateid;
2137 }
2138
2139 *stpp = stp;
2140 *sopp = sop = stp->st_stateowner;
2141
2142 /*
2143 * We now validate the seqid and stateid generation numbers.
2144 * For the moment, we ignore the possibility of
2145 * generation number wraparound.
2146 */
2147 if (seqid != sop->so_seqid)
2148 goto check_replay;
2149
2150 if (sop->so_confirmed && flags & CONFIRM) {
2151 dprintk("NFSD: preprocess_seqid_op: expected"
2152 " unconfirmed stateowner!\n");
2153 return nfserr_bad_stateid;
2154 }
2155 if (!sop->so_confirmed && !(flags & CONFIRM)) {
2156 dprintk("NFSD: preprocess_seqid_op: stateowner not"
2157 " confirmed yet!\n");
2158 return nfserr_bad_stateid;
2159 }
2160 if (stateid->si_generation > stp->st_stateid.si_generation) {
2161 dprintk("NFSD: preprocess_seqid_op: future stateid?!\n");
2162 return nfserr_bad_stateid;
2163 }
2164
2165 if (stateid->si_generation < stp->st_stateid.si_generation) {
2166 dprintk("NFSD: preprocess_seqid_op: old stateid!\n");
2167 return nfserr_old_stateid;
2168 }
2169 renew_client(sop->so_client);
2170 return nfs_ok;
2171
2172 check_replay:
2173 if (seqid == sop->so_seqid - 1) {
2174 dprintk("NFSD: preprocess_seqid_op: retransmission?\n");
2175 /* indicate replay to calling function */
2176 return nfserr_replay_me;
2177 }
2178 dprintk("NFSD: preprocess_seqid_op: bad seqid (expected %d, got %d)\n",
2179 sop->so_seqid, seqid);
2180 *sopp = NULL;
2181 return nfserr_bad_seqid;
2182 }
2183
2184 __be32
2185 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2186 struct nfsd4_open_confirm *oc)
2187 {
2188 __be32 status;
2189 struct nfs4_stateowner *sop;
2190 struct nfs4_stateid *stp;
2191
2192 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
2193 (int)cstate->current_fh.fh_dentry->d_name.len,
2194 cstate->current_fh.fh_dentry->d_name.name);
2195
2196 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
2197 if (status)
2198 return status;
2199
2200 nfs4_lock_state();
2201
2202 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2203 oc->oc_seqid, &oc->oc_req_stateid,
2204 CHECK_FH | CONFIRM | OPEN_STATE,
2205 &oc->oc_stateowner, &stp, NULL)))
2206 goto out;
2207
2208 sop = oc->oc_stateowner;
2209 sop->so_confirmed = 1;
2210 update_stateid(&stp->st_stateid);
2211 memcpy(&oc->oc_resp_stateid, &stp->st_stateid, sizeof(stateid_t));
2212 dprintk("NFSD: nfsd4_open_confirm: success, seqid=%d "
2213 "stateid=(%08x/%08x/%08x/%08x)\n", oc->oc_seqid,
2214 stp->st_stateid.si_boot,
2215 stp->st_stateid.si_stateownerid,
2216 stp->st_stateid.si_fileid,
2217 stp->st_stateid.si_generation);
2218
2219 nfsd4_create_clid_dir(sop->so_client);
2220 out:
2221 if (oc->oc_stateowner) {
2222 nfs4_get_stateowner(oc->oc_stateowner);
2223 cstate->replay_owner = oc->oc_stateowner;
2224 }
2225 nfs4_unlock_state();
2226 return status;
2227 }
2228
2229
2230 /*
2231 * unset all bits in union bitmap (bmap) that
2232 * do not exist in share (from successful OPEN_DOWNGRADE)
2233 */
2234 static void
2235 reset_union_bmap_access(unsigned long access, unsigned long *bmap)
2236 {
2237 int i;
2238 for (i = 1; i < 4; i++) {
2239 if ((i & access) != i)
2240 __clear_bit(i, bmap);
2241 }
2242 }
2243
2244 static void
2245 reset_union_bmap_deny(unsigned long deny, unsigned long *bmap)
2246 {
2247 int i;
2248 for (i = 0; i < 4; i++) {
2249 if ((i & deny) != i)
2250 __clear_bit(i, bmap);
2251 }
2252 }
2253
2254 __be32
2255 nfsd4_open_downgrade(struct svc_rqst *rqstp,
2256 struct nfsd4_compound_state *cstate,
2257 struct nfsd4_open_downgrade *od)
2258 {
2259 __be32 status;
2260 struct nfs4_stateid *stp;
2261 unsigned int share_access;
2262
2263 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
2264 (int)cstate->current_fh.fh_dentry->d_name.len,
2265 cstate->current_fh.fh_dentry->d_name.name);
2266
2267 if (!access_valid(od->od_share_access)
2268 || !deny_valid(od->od_share_deny))
2269 return nfserr_inval;
2270
2271 nfs4_lock_state();
2272 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2273 od->od_seqid,
2274 &od->od_stateid,
2275 CHECK_FH | OPEN_STATE,
2276 &od->od_stateowner, &stp, NULL)))
2277 goto out;
2278
2279 status = nfserr_inval;
2280 if (!test_bit(od->od_share_access, &stp->st_access_bmap)) {
2281 dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n",
2282 stp->st_access_bmap, od->od_share_access);
2283 goto out;
2284 }
2285 if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) {
2286 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
2287 stp->st_deny_bmap, od->od_share_deny);
2288 goto out;
2289 }
2290 set_access(&share_access, stp->st_access_bmap);
2291 nfs4_file_downgrade(stp->st_vfs_file,
2292 share_access & ~od->od_share_access);
2293
2294 reset_union_bmap_access(od->od_share_access, &stp->st_access_bmap);
2295 reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap);
2296
2297 update_stateid(&stp->st_stateid);
2298 memcpy(&od->od_stateid, &stp->st_stateid, sizeof(stateid_t));
2299 status = nfs_ok;
2300 out:
2301 if (od->od_stateowner) {
2302 nfs4_get_stateowner(od->od_stateowner);
2303 cstate->replay_owner = od->od_stateowner;
2304 }
2305 nfs4_unlock_state();
2306 return status;
2307 }
2308
2309 /*
2310 * nfs4_unlock_state() called after encode
2311 */
2312 __be32
2313 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2314 struct nfsd4_close *close)
2315 {
2316 __be32 status;
2317 struct nfs4_stateid *stp;
2318
2319 dprintk("NFSD: nfsd4_close on file %.*s\n",
2320 (int)cstate->current_fh.fh_dentry->d_name.len,
2321 cstate->current_fh.fh_dentry->d_name.name);
2322
2323 nfs4_lock_state();
2324 /* check close_lru for replay */
2325 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2326 close->cl_seqid,
2327 &close->cl_stateid,
2328 CHECK_FH | OPEN_STATE | CLOSE_STATE,
2329 &close->cl_stateowner, &stp, NULL)))
2330 goto out;
2331 status = nfs_ok;
2332 update_stateid(&stp->st_stateid);
2333 memcpy(&close->cl_stateid, &stp->st_stateid, sizeof(stateid_t));
2334
2335 /* release_stateid() calls nfsd_close() if needed */
2336 release_stateid(stp, OPEN_STATE);
2337
2338 /* place unused nfs4_stateowners on so_close_lru list to be
2339 * released by the laundromat service after the lease period
2340 * to enable us to handle CLOSE replay
2341 */
2342 if (list_empty(&close->cl_stateowner->so_stateids))
2343 move_to_close_lru(close->cl_stateowner);
2344 out:
2345 if (close->cl_stateowner) {
2346 nfs4_get_stateowner(close->cl_stateowner);
2347 cstate->replay_owner = close->cl_stateowner;
2348 }
2349 nfs4_unlock_state();
2350 return status;
2351 }
2352
2353 __be32
2354 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2355 struct nfsd4_delegreturn *dr)
2356 {
2357 __be32 status;
2358
2359 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
2360 goto out;
2361
2362 nfs4_lock_state();
2363 status = nfs4_preprocess_stateid_op(&cstate->current_fh,
2364 &dr->dr_stateid, DELEG_RET, NULL);
2365 nfs4_unlock_state();
2366 out:
2367 return status;
2368 }
2369
2370
2371 /*
2372 * Lock owner state (byte-range locks)
2373 */
2374 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
2375 #define LOCK_HASH_BITS 8
2376 #define LOCK_HASH_SIZE (1 << LOCK_HASH_BITS)
2377 #define LOCK_HASH_MASK (LOCK_HASH_SIZE - 1)
2378
2379 #define lockownerid_hashval(id) \
2380 ((id) & LOCK_HASH_MASK)
2381
2382 static inline unsigned int
2383 lock_ownerstr_hashval(struct inode *inode, u32 cl_id,
2384 struct xdr_netobj *ownername)
2385 {
2386 return (file_hashval(inode) + cl_id
2387 + opaque_hashval(ownername->data, ownername->len))
2388 & LOCK_HASH_MASK;
2389 }
2390
2391 static struct list_head lock_ownerid_hashtbl[LOCK_HASH_SIZE];
2392 static struct list_head lock_ownerstr_hashtbl[LOCK_HASH_SIZE];
2393 static struct list_head lockstateid_hashtbl[STATEID_HASH_SIZE];
2394
2395 static struct nfs4_stateid *
2396 find_stateid(stateid_t *stid, int flags)
2397 {
2398 struct nfs4_stateid *local = NULL;
2399 u32 st_id = stid->si_stateownerid;
2400 u32 f_id = stid->si_fileid;
2401 unsigned int hashval;
2402
2403 dprintk("NFSD: find_stateid flags 0x%x\n",flags);
2404 if ((flags & LOCK_STATE) || (flags & RD_STATE) || (flags & WR_STATE)) {
2405 hashval = stateid_hashval(st_id, f_id);
2406 list_for_each_entry(local, &lockstateid_hashtbl[hashval], st_hash) {
2407 if ((local->st_stateid.si_stateownerid == st_id) &&
2408 (local->st_stateid.si_fileid == f_id))
2409 return local;
2410 }
2411 }
2412 if ((flags & OPEN_STATE) || (flags & RD_STATE) || (flags & WR_STATE)) {
2413 hashval = stateid_hashval(st_id, f_id);
2414 list_for_each_entry(local, &stateid_hashtbl[hashval], st_hash) {
2415 if ((local->st_stateid.si_stateownerid == st_id) &&
2416 (local->st_stateid.si_fileid == f_id))
2417 return local;
2418 }
2419 }
2420 return NULL;
2421 }
2422
2423 static struct nfs4_delegation *
2424 find_delegation_stateid(struct inode *ino, stateid_t *stid)
2425 {
2426 struct nfs4_file *fp;
2427 struct nfs4_delegation *dl;
2428
2429 dprintk("NFSD:find_delegation_stateid stateid=(%08x/%08x/%08x/%08x)\n",
2430 stid->si_boot, stid->si_stateownerid,
2431 stid->si_fileid, stid->si_generation);
2432
2433 fp = find_file(ino);
2434 if (!fp)
2435 return NULL;
2436 dl = find_delegation_file(fp, stid);
2437 put_nfs4_file(fp);
2438 return dl;
2439 }
2440
2441 /*
2442 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
2443 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
2444 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
2445 * locking, this prevents us from being completely protocol-compliant. The
2446 * real solution to this problem is to start using unsigned file offsets in
2447 * the VFS, but this is a very deep change!
2448 */
2449 static inline void
2450 nfs4_transform_lock_offset(struct file_lock *lock)
2451 {
2452 if (lock->fl_start < 0)
2453 lock->fl_start = OFFSET_MAX;
2454 if (lock->fl_end < 0)
2455 lock->fl_end = OFFSET_MAX;
2456 }
2457
2458 /* Hack!: For now, we're defining this just so we can use a pointer to it
2459 * as a unique cookie to identify our (NFSv4's) posix locks. */
2460 static struct lock_manager_operations nfsd_posix_mng_ops = {
2461 };
2462
2463 static inline void
2464 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
2465 {
2466 struct nfs4_stateowner *sop;
2467 unsigned int hval;
2468
2469 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
2470 sop = (struct nfs4_stateowner *) fl->fl_owner;
2471 hval = lockownerid_hashval(sop->so_id);
2472 kref_get(&sop->so_ref);
2473 deny->ld_sop = sop;
2474 deny->ld_clientid = sop->so_client->cl_clientid;
2475 } else {
2476 deny->ld_sop = NULL;
2477 deny->ld_clientid.cl_boot = 0;
2478 deny->ld_clientid.cl_id = 0;
2479 }
2480 deny->ld_start = fl->fl_start;
2481 deny->ld_length = ~(u64)0;
2482 if (fl->fl_end != ~(u64)0)
2483 deny->ld_length = fl->fl_end - fl->fl_start + 1;
2484 deny->ld_type = NFS4_READ_LT;
2485 if (fl->fl_type != F_RDLCK)
2486 deny->ld_type = NFS4_WRITE_LT;
2487 }
2488
2489 static struct nfs4_stateowner *
2490 find_lockstateowner_str(struct inode *inode, clientid_t *clid,
2491 struct xdr_netobj *owner)
2492 {
2493 unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
2494 struct nfs4_stateowner *op;
2495
2496 list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
2497 if (same_owner_str(op, owner, clid))
2498 return op;
2499 }
2500 return NULL;
2501 }
2502
2503 /*
2504 * Alloc a lock owner structure.
2505 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
2506 * occured.
2507 *
2508 * strhashval = lock_ownerstr_hashval
2509 */
2510
2511 static struct nfs4_stateowner *
2512 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_stateid *open_stp, struct nfsd4_lock *lock) {
2513 struct nfs4_stateowner *sop;
2514 struct nfs4_replay *rp;
2515 unsigned int idhashval;
2516
2517 if (!(sop = alloc_stateowner(&lock->lk_new_owner)))
2518 return NULL;
2519 idhashval = lockownerid_hashval(current_ownerid);
2520 INIT_LIST_HEAD(&sop->so_idhash);
2521 INIT_LIST_HEAD(&sop->so_strhash);
2522 INIT_LIST_HEAD(&sop->so_perclient);
2523 INIT_LIST_HEAD(&sop->so_stateids);
2524 INIT_LIST_HEAD(&sop->so_perstateid);
2525 INIT_LIST_HEAD(&sop->so_close_lru); /* not used */
2526 sop->so_time = 0;
2527 list_add(&sop->so_idhash, &lock_ownerid_hashtbl[idhashval]);
2528 list_add(&sop->so_strhash, &lock_ownerstr_hashtbl[strhashval]);
2529 list_add(&sop->so_perstateid, &open_stp->st_lockowners);
2530 sop->so_is_open_owner = 0;
2531 sop->so_id = current_ownerid++;
2532 sop->so_client = clp;
2533 /* It is the openowner seqid that will be incremented in encode in the
2534 * case of new lockowners; so increment the lock seqid manually: */
2535 sop->so_seqid = lock->lk_new_lock_seqid + 1;
2536 sop->so_confirmed = 1;
2537 rp = &sop->so_replay;
2538 rp->rp_status = nfserr_serverfault;
2539 rp->rp_buflen = 0;
2540 rp->rp_buf = rp->rp_ibuf;
2541 return sop;
2542 }
2543
2544 static struct nfs4_stateid *
2545 alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struct nfs4_stateid *open_stp)
2546 {
2547 struct nfs4_stateid *stp;
2548 unsigned int hashval = stateid_hashval(sop->so_id, fp->fi_id);
2549
2550 stp = nfs4_alloc_stateid();
2551 if (stp == NULL)
2552 goto out;
2553 INIT_LIST_HEAD(&stp->st_hash);
2554 INIT_LIST_HEAD(&stp->st_perfile);
2555 INIT_LIST_HEAD(&stp->st_perstateowner);
2556 INIT_LIST_HEAD(&stp->st_lockowners); /* not used */
2557 list_add(&stp->st_hash, &lockstateid_hashtbl[hashval]);
2558 list_add(&stp->st_perfile, &fp->fi_stateids);
2559 list_add(&stp->st_perstateowner, &sop->so_stateids);
2560 stp->st_stateowner = sop;
2561 get_nfs4_file(fp);
2562 stp->st_file = fp;
2563 stp->st_stateid.si_boot = boot_time;
2564 stp->st_stateid.si_stateownerid = sop->so_id;
2565 stp->st_stateid.si_fileid = fp->fi_id;
2566 stp->st_stateid.si_generation = 0;
2567 stp->st_vfs_file = open_stp->st_vfs_file; /* FIXME refcount?? */
2568 stp->st_access_bmap = open_stp->st_access_bmap;
2569 stp->st_deny_bmap = open_stp->st_deny_bmap;
2570 stp->st_openstp = open_stp;
2571
2572 out:
2573 return stp;
2574 }
2575
2576 static int
2577 check_lock_length(u64 offset, u64 length)
2578 {
2579 return ((length == 0) || ((length != ~(u64)0) &&
2580 LOFF_OVERFLOW(offset, length)));
2581 }
2582
2583 /*
2584 * LOCK operation
2585 */
2586 __be32
2587 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2588 struct nfsd4_lock *lock)
2589 {
2590 struct nfs4_stateowner *open_sop = NULL;
2591 struct nfs4_stateowner *lock_sop = NULL;
2592 struct nfs4_stateid *lock_stp;
2593 struct file *filp;
2594 struct file_lock file_lock;
2595 struct file_lock conflock;
2596 __be32 status = 0;
2597 unsigned int strhashval;
2598 unsigned int cmd;
2599 int err;
2600
2601 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
2602 (long long) lock->lk_offset,
2603 (long long) lock->lk_length);
2604
2605 if (check_lock_length(lock->lk_offset, lock->lk_length))
2606 return nfserr_inval;
2607
2608 if ((status = fh_verify(rqstp, &cstate->current_fh,
2609 S_IFREG, MAY_LOCK))) {
2610 dprintk("NFSD: nfsd4_lock: permission denied!\n");
2611 return status;
2612 }
2613
2614 nfs4_lock_state();
2615
2616 if (lock->lk_is_new) {
2617 /*
2618 * Client indicates that this is a new lockowner.
2619 * Use open owner and open stateid to create lock owner and
2620 * lock stateid.
2621 */
2622 struct nfs4_stateid *open_stp = NULL;
2623 struct nfs4_file *fp;
2624
2625 status = nfserr_stale_clientid;
2626 if (STALE_CLIENTID(&lock->lk_new_clientid))
2627 goto out;
2628
2629 /* validate and update open stateid and open seqid */
2630 status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2631 lock->lk_new_open_seqid,
2632 &lock->lk_new_open_stateid,
2633 CHECK_FH | OPEN_STATE,
2634 &lock->lk_replay_owner, &open_stp,
2635 lock);
2636 if (status)
2637 goto out;
2638 open_sop = lock->lk_replay_owner;
2639 /* create lockowner and lock stateid */
2640 fp = open_stp->st_file;
2641 strhashval = lock_ownerstr_hashval(fp->fi_inode,
2642 open_sop->so_client->cl_clientid.cl_id,
2643 &lock->v.new.owner);
2644 /* XXX: Do we need to check for duplicate stateowners on
2645 * the same file, or should they just be allowed (and
2646 * create new stateids)? */
2647 status = nfserr_resource;
2648 lock_sop = alloc_init_lock_stateowner(strhashval,
2649 open_sop->so_client, open_stp, lock);
2650 if (lock_sop == NULL)
2651 goto out;
2652 lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp);
2653 if (lock_stp == NULL)
2654 goto out;
2655 } else {
2656 /* lock (lock owner + lock stateid) already exists */
2657 status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2658 lock->lk_old_lock_seqid,
2659 &lock->lk_old_lock_stateid,
2660 CHECK_FH | LOCK_STATE,
2661 &lock->lk_replay_owner, &lock_stp, lock);
2662 if (status)
2663 goto out;
2664 lock_sop = lock->lk_replay_owner;
2665 }
2666 /* lock->lk_replay_owner and lock_stp have been created or found */
2667 filp = lock_stp->st_vfs_file;
2668
2669 status = nfserr_grace;
2670 if (nfs4_in_grace() && !lock->lk_reclaim)
2671 goto out;
2672 status = nfserr_no_grace;
2673 if (!nfs4_in_grace() && lock->lk_reclaim)
2674 goto out;
2675
2676 locks_init_lock(&file_lock);
2677 switch (lock->lk_type) {
2678 case NFS4_READ_LT:
2679 case NFS4_READW_LT:
2680 file_lock.fl_type = F_RDLCK;
2681 cmd = F_SETLK;
2682 break;
2683 case NFS4_WRITE_LT:
2684 case NFS4_WRITEW_LT:
2685 file_lock.fl_type = F_WRLCK;
2686 cmd = F_SETLK;
2687 break;
2688 default:
2689 status = nfserr_inval;
2690 goto out;
2691 }
2692 file_lock.fl_owner = (fl_owner_t)lock_sop;
2693 file_lock.fl_pid = current->tgid;
2694 file_lock.fl_file = filp;
2695 file_lock.fl_flags = FL_POSIX;
2696 file_lock.fl_lmops = &nfsd_posix_mng_ops;
2697
2698 file_lock.fl_start = lock->lk_offset;
2699 if ((lock->lk_length == ~(u64)0) ||
2700 LOFF_OVERFLOW(lock->lk_offset, lock->lk_length))
2701 file_lock.fl_end = ~(u64)0;
2702 else
2703 file_lock.fl_end = lock->lk_offset + lock->lk_length - 1;
2704 nfs4_transform_lock_offset(&file_lock);
2705
2706 /*
2707 * Try to lock the file in the VFS.
2708 * Note: locks.c uses the BKL to protect the inode's lock list.
2709 */
2710
2711 /* XXX?: Just to divert the locks_release_private at the start of
2712 * locks_copy_lock: */
2713 locks_init_lock(&conflock);
2714 err = vfs_lock_file(filp, cmd, &file_lock, &conflock);
2715 switch (-err) {
2716 case 0: /* success! */
2717 update_stateid(&lock_stp->st_stateid);
2718 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid,
2719 sizeof(stateid_t));
2720 status = 0;
2721 break;
2722 case (EAGAIN): /* conflock holds conflicting lock */
2723 status = nfserr_denied;
2724 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
2725 nfs4_set_lock_denied(&conflock, &lock->lk_denied);
2726 break;
2727 case (EDEADLK):
2728 status = nfserr_deadlock;
2729 break;
2730 default:
2731 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
2732 status = nfserr_resource;
2733 break;
2734 }
2735 out:
2736 if (status && lock->lk_is_new && lock_sop)
2737 release_stateowner(lock_sop);
2738 if (lock->lk_replay_owner) {
2739 nfs4_get_stateowner(lock->lk_replay_owner);
2740 cstate->replay_owner = lock->lk_replay_owner;
2741 }
2742 nfs4_unlock_state();
2743 return status;
2744 }
2745
2746 /*
2747 * LOCKT operation
2748 */
2749 __be32
2750 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2751 struct nfsd4_lockt *lockt)
2752 {
2753 struct inode *inode;
2754 struct file file;
2755 struct file_lock file_lock;
2756 int error;
2757 __be32 status;
2758
2759 if (nfs4_in_grace())
2760 return nfserr_grace;
2761
2762 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
2763 return nfserr_inval;
2764
2765 lockt->lt_stateowner = NULL;
2766 nfs4_lock_state();
2767
2768 status = nfserr_stale_clientid;
2769 if (STALE_CLIENTID(&lockt->lt_clientid))
2770 goto out;
2771
2772 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) {
2773 dprintk("NFSD: nfsd4_lockt: fh_verify() failed!\n");
2774 if (status == nfserr_symlink)
2775 status = nfserr_inval;
2776 goto out;
2777 }
2778
2779 inode = cstate->current_fh.fh_dentry->d_inode;
2780 locks_init_lock(&file_lock);
2781 switch (lockt->lt_type) {
2782 case NFS4_READ_LT:
2783 case NFS4_READW_LT:
2784 file_lock.fl_type = F_RDLCK;
2785 break;
2786 case NFS4_WRITE_LT:
2787 case NFS4_WRITEW_LT:
2788 file_lock.fl_type = F_WRLCK;
2789 break;
2790 default:
2791 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
2792 status = nfserr_inval;
2793 goto out;
2794 }
2795
2796 lockt->lt_stateowner = find_lockstateowner_str(inode,
2797 &lockt->lt_clientid, &lockt->lt_owner);
2798 if (lockt->lt_stateowner)
2799 file_lock.fl_owner = (fl_owner_t)lockt->lt_stateowner;
2800 file_lock.fl_pid = current->tgid;
2801 file_lock.fl_flags = FL_POSIX;
2802 file_lock.fl_lmops = &nfsd_posix_mng_ops;
2803
2804 file_lock.fl_start = lockt->lt_offset;
2805 if ((lockt->lt_length == ~(u64)0) || LOFF_OVERFLOW(lockt->lt_offset, lockt->lt_length))
2806 file_lock.fl_end = ~(u64)0;
2807 else
2808 file_lock.fl_end = lockt->lt_offset + lockt->lt_length - 1;
2809
2810 nfs4_transform_lock_offset(&file_lock);
2811
2812 /* vfs_test_lock uses the struct file _only_ to resolve the inode.
2813 * since LOCKT doesn't require an OPEN, and therefore a struct
2814 * file may not exist, pass vfs_test_lock a struct file with
2815 * only the dentry:inode set.
2816 */
2817 memset(&file, 0, sizeof (struct file));
2818 file.f_path.dentry = cstate->current_fh.fh_dentry;
2819
2820 status = nfs_ok;
2821 error = vfs_test_lock(&file, &file_lock);
2822 if (error) {
2823 status = nfserrno(error);
2824 goto out;
2825 }
2826 if (file_lock.fl_type != F_UNLCK) {
2827 status = nfserr_denied;
2828 nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
2829 }
2830 out:
2831 nfs4_unlock_state();
2832 return status;
2833 }
2834
2835 __be32
2836 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2837 struct nfsd4_locku *locku)
2838 {
2839 struct nfs4_stateid *stp;
2840 struct file *filp = NULL;
2841 struct file_lock file_lock;
2842 __be32 status;
2843 int err;
2844
2845 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
2846 (long long) locku->lu_offset,
2847 (long long) locku->lu_length);
2848
2849 if (check_lock_length(locku->lu_offset, locku->lu_length))
2850 return nfserr_inval;
2851
2852 nfs4_lock_state();
2853
2854 if ((status = nfs4_preprocess_seqid_op(&cstate->current_fh,
2855 locku->lu_seqid,
2856 &locku->lu_stateid,
2857 CHECK_FH | LOCK_STATE,
2858 &locku->lu_stateowner, &stp, NULL)))
2859 goto out;
2860
2861 filp = stp->st_vfs_file;
2862 BUG_ON(!filp);
2863 locks_init_lock(&file_lock);
2864 file_lock.fl_type = F_UNLCK;
2865 file_lock.fl_owner = (fl_owner_t) locku->lu_stateowner;
2866 file_lock.fl_pid = current->tgid;
2867 file_lock.fl_file = filp;
2868 file_lock.fl_flags = FL_POSIX;
2869 file_lock.fl_lmops = &nfsd_posix_mng_ops;
2870 file_lock.fl_start = locku->lu_offset;
2871
2872 if ((locku->lu_length == ~(u64)0) || LOFF_OVERFLOW(locku->lu_offset, locku->lu_length))
2873 file_lock.fl_end = ~(u64)0;
2874 else
2875 file_lock.fl_end = locku->lu_offset + locku->lu_length - 1;
2876 nfs4_transform_lock_offset(&file_lock);
2877
2878 /*
2879 * Try to unlock the file in the VFS.
2880 */
2881 err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL);
2882 if (err) {
2883 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
2884 goto out_nfserr;
2885 }
2886 /*
2887 * OK, unlock succeeded; the only thing left to do is update the stateid.
2888 */
2889 update_stateid(&stp->st_stateid);
2890 memcpy(&locku->lu_stateid, &stp->st_stateid, sizeof(stateid_t));
2891
2892 out:
2893 if (locku->lu_stateowner) {
2894 nfs4_get_stateowner(locku->lu_stateowner);
2895 cstate->replay_owner = locku->lu_stateowner;
2896 }
2897 nfs4_unlock_state();
2898 return status;
2899
2900 out_nfserr:
2901 status = nfserrno(err);
2902 goto out;
2903 }
2904
2905 /*
2906 * returns
2907 * 1: locks held by lockowner
2908 * 0: no locks held by lockowner
2909 */
2910 static int
2911 check_for_locks(struct file *filp, struct nfs4_stateowner *lowner)
2912 {
2913 struct file_lock **flpp;
2914 struct inode *inode = filp->f_path.dentry->d_inode;
2915 int status = 0;
2916
2917 lock_kernel();
2918 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
2919 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
2920 status = 1;
2921 goto out;
2922 }
2923 }
2924 out:
2925 unlock_kernel();
2926 return status;
2927 }
2928
2929 __be32
2930 nfsd4_release_lockowner(struct svc_rqst *rqstp,
2931 struct nfsd4_compound_state *cstate,
2932 struct nfsd4_release_lockowner *rlockowner)
2933 {
2934 clientid_t *clid = &rlockowner->rl_clientid;
2935 struct nfs4_stateowner *sop;
2936 struct nfs4_stateid *stp;
2937 struct xdr_netobj *owner = &rlockowner->rl_owner;
2938 struct list_head matches;
2939 int i;
2940 __be32 status;
2941
2942 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
2943 clid->cl_boot, clid->cl_id);
2944
2945 /* XXX check for lease expiration */
2946
2947 status = nfserr_stale_clientid;
2948 if (STALE_CLIENTID(clid))
2949 return status;
2950
2951 nfs4_lock_state();
2952
2953 status = nfserr_locks_held;
2954 /* XXX: we're doing a linear search through all the lockowners.
2955 * Yipes! For now we'll just hope clients aren't really using
2956 * release_lockowner much, but eventually we have to fix these
2957 * data structures. */
2958 INIT_LIST_HEAD(&matches);
2959 for (i = 0; i < LOCK_HASH_SIZE; i++) {
2960 list_for_each_entry(sop, &lock_ownerid_hashtbl[i], so_idhash) {
2961 if (!same_owner_str(sop, owner, clid))
2962 continue;
2963 list_for_each_entry(stp, &sop->so_stateids,
2964 st_perstateowner) {
2965 if (check_for_locks(stp->st_vfs_file, sop))
2966 goto out;
2967 /* Note: so_perclient unused for lockowners,
2968 * so it's OK to fool with here. */
2969 list_add(&sop->so_perclient, &matches);
2970 }
2971 }
2972 }
2973 /* Clients probably won't expect us to return with some (but not all)
2974 * of the lockowner state released; so don't release any until all
2975 * have been checked. */
2976 status = nfs_ok;
2977 while (!list_empty(&matches)) {
2978 sop = list_entry(matches.next, struct nfs4_stateowner,
2979 so_perclient);
2980 /* unhash_stateowner deletes so_perclient only
2981 * for openowners. */
2982 list_del(&sop->so_perclient);
2983 release_stateowner(sop);
2984 }
2985 out:
2986 nfs4_unlock_state();
2987 return status;
2988 }
2989
2990 static inline struct nfs4_client_reclaim *
2991 alloc_reclaim(void)
2992 {
2993 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
2994 }
2995
2996 int
2997 nfs4_has_reclaimed_state(const char *name)
2998 {
2999 unsigned int strhashval = clientstr_hashval(name);
3000 struct nfs4_client *clp;
3001
3002 clp = find_confirmed_client_by_str(name, strhashval);
3003 return clp ? 1 : 0;
3004 }
3005
3006 /*
3007 * failure => all reset bets are off, nfserr_no_grace...
3008 */
3009 int
3010 nfs4_client_to_reclaim(const char *name)
3011 {
3012 unsigned int strhashval;
3013 struct nfs4_client_reclaim *crp = NULL;
3014
3015 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
3016 crp = alloc_reclaim();
3017 if (!crp)
3018 return 0;
3019 strhashval = clientstr_hashval(name);
3020 INIT_LIST_HEAD(&crp->cr_strhash);
3021 list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]);
3022 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
3023 reclaim_str_hashtbl_size++;
3024 return 1;
3025 }
3026
3027 static void
3028 nfs4_release_reclaim(void)
3029 {
3030 struct nfs4_client_reclaim *crp = NULL;
3031 int i;
3032
3033 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
3034 while (!list_empty(&reclaim_str_hashtbl[i])) {
3035 crp = list_entry(reclaim_str_hashtbl[i].next,
3036 struct nfs4_client_reclaim, cr_strhash);
3037 list_del(&crp->cr_strhash);
3038 kfree(crp);
3039 reclaim_str_hashtbl_size--;
3040 }
3041 }
3042 BUG_ON(reclaim_str_hashtbl_size);
3043 }
3044
3045 /*
3046 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
3047 static struct nfs4_client_reclaim *
3048 nfs4_find_reclaim_client(clientid_t *clid)
3049 {
3050 unsigned int strhashval;
3051 struct nfs4_client *clp;
3052 struct nfs4_client_reclaim *crp = NULL;
3053
3054
3055 /* find clientid in conf_id_hashtbl */
3056 clp = find_confirmed_client(clid);
3057 if (clp == NULL)
3058 return NULL;
3059
3060 dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n",
3061 clp->cl_name.len, clp->cl_name.data,
3062 clp->cl_recdir);
3063
3064 /* find clp->cl_name in reclaim_str_hashtbl */
3065 strhashval = clientstr_hashval(clp->cl_recdir);
3066 list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) {
3067 if (same_name(crp->cr_recdir, clp->cl_recdir)) {
3068 return crp;
3069 }
3070 }
3071 return NULL;
3072 }
3073
3074 /*
3075 * Called from OPEN. Look for clientid in reclaim list.
3076 */
3077 __be32
3078 nfs4_check_open_reclaim(clientid_t *clid)
3079 {
3080 return nfs4_find_reclaim_client(clid) ? nfs_ok : nfserr_reclaim_bad;
3081 }
3082
3083 /* initialization to perform at module load time: */
3084
3085 int
3086 nfs4_state_init(void)
3087 {
3088 int i, status;
3089
3090 status = nfsd4_init_slabs();
3091 if (status)
3092 return status;
3093 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
3094 INIT_LIST_HEAD(&conf_id_hashtbl[i]);
3095 INIT_LIST_HEAD(&conf_str_hashtbl[i]);
3096 INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
3097 INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
3098 }
3099 for (i = 0; i < FILE_HASH_SIZE; i++) {
3100 INIT_LIST_HEAD(&file_hashtbl[i]);
3101 }
3102 for (i = 0; i < OWNER_HASH_SIZE; i++) {
3103 INIT_LIST_HEAD(&ownerstr_hashtbl[i]);
3104 INIT_LIST_HEAD(&ownerid_hashtbl[i]);
3105 }
3106 for (i = 0; i < STATEID_HASH_SIZE; i++) {
3107 INIT_LIST_HEAD(&stateid_hashtbl[i]);
3108 INIT_LIST_HEAD(&lockstateid_hashtbl[i]);
3109 }
3110 for (i = 0; i < LOCK_HASH_SIZE; i++) {
3111 INIT_LIST_HEAD(&lock_ownerid_hashtbl[i]);
3112 INIT_LIST_HEAD(&lock_ownerstr_hashtbl[i]);
3113 }
3114 memset(&onestateid, ~0, sizeof(stateid_t));
3115 INIT_LIST_HEAD(&close_lru);
3116 INIT_LIST_HEAD(&client_lru);
3117 INIT_LIST_HEAD(&del_recall_lru);
3118 for (i = 0; i < CLIENT_HASH_SIZE; i++)
3119 INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
3120 reclaim_str_hashtbl_size = 0;
3121 return 0;
3122 }
3123
3124 static void
3125 nfsd4_load_reboot_recovery_data(void)
3126 {
3127 int status;
3128
3129 nfs4_lock_state();
3130 nfsd4_init_recdir(user_recovery_dirname);
3131 status = nfsd4_recdir_load();
3132 nfs4_unlock_state();
3133 if (status)
3134 printk("NFSD: Failure reading reboot recovery data\n");
3135 }
3136
3137 unsigned long
3138 get_nfs4_grace_period(void)
3139 {
3140 return max(user_lease_time, lease_time) * HZ;
3141 }
3142
3143 /*
3144 * Since the lifetime of a delegation isn't limited to that of an open, a
3145 * client may quite reasonably hang on to a delegation as long as it has
3146 * the inode cached. This becomes an obvious problem the first time a
3147 * client's inode cache approaches the size of the server's total memory.
3148 *
3149 * For now we avoid this problem by imposing a hard limit on the number
3150 * of delegations, which varies according to the server's memory size.
3151 */
3152 static void
3153 set_max_delegations(void)
3154 {
3155 /*
3156 * Allow at most 4 delegations per megabyte of RAM. Quick
3157 * estimates suggest that in the worst case (where every delegation
3158 * is for a different inode), a delegation could take about 1.5K,
3159 * giving a worst case usage of about 6% of memory.
3160 */
3161 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
3162 }
3163
3164 /* initialization to perform when the nfsd service is started: */
3165
3166 static void
3167 __nfs4_state_start(void)
3168 {
3169 unsigned long grace_time;
3170
3171 boot_time = get_seconds();
3172 grace_time = get_nfs_grace_period();
3173 lease_time = user_lease_time;
3174 in_grace = 1;
3175 printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
3176 grace_time/HZ);
3177 laundry_wq = create_singlethread_workqueue("nfsd4");
3178 queue_delayed_work(laundry_wq, &laundromat_work, grace_time);
3179 set_max_delegations();
3180 }
3181
3182 void
3183 nfs4_state_start(void)
3184 {
3185 if (nfs4_init)
3186 return;
3187 nfsd4_load_reboot_recovery_data();
3188 __nfs4_state_start();
3189 nfs4_init = 1;
3190 return;
3191 }
3192
3193 int
3194 nfs4_in_grace(void)
3195 {
3196 return in_grace;
3197 }
3198
3199 time_t
3200 nfs4_lease_time(void)
3201 {
3202 return lease_time;
3203 }
3204
3205 static void
3206 __nfs4_state_shutdown(void)
3207 {
3208 int i;
3209 struct nfs4_client *clp = NULL;
3210 struct nfs4_delegation *dp = NULL;
3211 struct list_head *pos, *next, reaplist;
3212
3213 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
3214 while (!list_empty(&conf_id_hashtbl[i])) {
3215 clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
3216 expire_client(clp);
3217 }
3218 while (!list_empty(&unconf_str_hashtbl[i])) {
3219 clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash);
3220 expire_client(clp);
3221 }
3222 }
3223 INIT_LIST_HEAD(&reaplist);
3224 spin_lock(&recall_lock);
3225 list_for_each_safe(pos, next, &del_recall_lru) {
3226 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3227 list_move(&dp->dl_recall_lru, &reaplist);
3228 }
3229 spin_unlock(&recall_lock);
3230 list_for_each_safe(pos, next, &reaplist) {
3231 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3232 list_del_init(&dp->dl_recall_lru);
3233 unhash_delegation(dp);
3234 }
3235
3236 nfsd4_shutdown_recdir();
3237 nfs4_init = 0;
3238 }
3239
3240 void
3241 nfs4_state_shutdown(void)
3242 {
3243 cancel_rearming_delayed_workqueue(laundry_wq, &laundromat_work);
3244 destroy_workqueue(laundry_wq);
3245 nfs4_lock_state();
3246 nfs4_release_reclaim();
3247 __nfs4_state_shutdown();
3248 nfs4_unlock_state();
3249 }
3250
3251 static void
3252 nfs4_set_recdir(char *recdir)
3253 {
3254 nfs4_lock_state();
3255 strcpy(user_recovery_dirname, recdir);
3256 nfs4_unlock_state();
3257 }
3258
3259 /*
3260 * Change the NFSv4 recovery directory to recdir.
3261 */
3262 int
3263 nfs4_reset_recoverydir(char *recdir)
3264 {
3265 int status;
3266 struct nameidata nd;
3267
3268 status = path_lookup(recdir, LOOKUP_FOLLOW, &nd);
3269 if (status)
3270 return status;
3271 status = -ENOTDIR;
3272 if (S_ISDIR(nd.dentry->d_inode->i_mode)) {
3273 nfs4_set_recdir(recdir);
3274 status = 0;
3275 }
3276 path_release(&nd);
3277 return status;
3278 }
3279
3280 /*
3281 * Called when leasetime is changed.
3282 *
3283 * The only way the protocol gives us to handle on-the-fly lease changes is to
3284 * simulate a reboot. Instead of doing that, we just wait till the next time
3285 * we start to register any changes in lease time. If the administrator
3286 * really wants to change the lease time *now*, they can go ahead and bring
3287 * nfsd down and then back up again after changing the lease time.
3288 */
3289 void
3290 nfs4_reset_lease(time_t leasetime)
3291 {
3292 lock_kernel();
3293 user_lease_time = leasetime;
3294 unlock_kernel();
3295 }