]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/nfsd/nfs4state.c
locks: convert posix locks to file_lock_context
[mirror_ubuntu-artful-kernel.git] / fs / nfsd / nfs4state.c
1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49
50 #include "netns.h"
51
52 #define NFSDDBG_FACILITY NFSDDBG_PROC
53
54 #define all_ones {{~0,~0},~0}
55 static const stateid_t one_stateid = {
56 .si_generation = ~0,
57 .si_opaque = all_ones,
58 };
59 static const stateid_t zero_stateid = {
60 /* all fields zero */
61 };
62 static const stateid_t currentstateid = {
63 .si_generation = 1,
64 };
65
66 static u64 current_sessionid = 1;
67
68 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
69 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
70 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
71
72 /* forward declarations */
73 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
74 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
75
76 /* Locking: */
77
78 /*
79 * Currently used for the del_recall_lru and file hash table. In an
80 * effort to decrease the scope of the client_mutex, this spinlock may
81 * eventually cover more:
82 */
83 static DEFINE_SPINLOCK(state_lock);
84
85 /*
86 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
87 * the refcount on the open stateid to drop.
88 */
89 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
90
91 static struct kmem_cache *openowner_slab;
92 static struct kmem_cache *lockowner_slab;
93 static struct kmem_cache *file_slab;
94 static struct kmem_cache *stateid_slab;
95 static struct kmem_cache *deleg_slab;
96
97 static void free_session(struct nfsd4_session *);
98
99 static struct nfsd4_callback_ops nfsd4_cb_recall_ops;
100
101 static bool is_session_dead(struct nfsd4_session *ses)
102 {
103 return ses->se_flags & NFS4_SESSION_DEAD;
104 }
105
106 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
107 {
108 if (atomic_read(&ses->se_ref) > ref_held_by_me)
109 return nfserr_jukebox;
110 ses->se_flags |= NFS4_SESSION_DEAD;
111 return nfs_ok;
112 }
113
114 static bool is_client_expired(struct nfs4_client *clp)
115 {
116 return clp->cl_time == 0;
117 }
118
119 static __be32 get_client_locked(struct nfs4_client *clp)
120 {
121 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
122
123 lockdep_assert_held(&nn->client_lock);
124
125 if (is_client_expired(clp))
126 return nfserr_expired;
127 atomic_inc(&clp->cl_refcount);
128 return nfs_ok;
129 }
130
131 /* must be called under the client_lock */
132 static inline void
133 renew_client_locked(struct nfs4_client *clp)
134 {
135 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
136
137 if (is_client_expired(clp)) {
138 WARN_ON(1);
139 printk("%s: client (clientid %08x/%08x) already expired\n",
140 __func__,
141 clp->cl_clientid.cl_boot,
142 clp->cl_clientid.cl_id);
143 return;
144 }
145
146 dprintk("renewing client (clientid %08x/%08x)\n",
147 clp->cl_clientid.cl_boot,
148 clp->cl_clientid.cl_id);
149 list_move_tail(&clp->cl_lru, &nn->client_lru);
150 clp->cl_time = get_seconds();
151 }
152
153 static inline void
154 renew_client(struct nfs4_client *clp)
155 {
156 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
157
158 spin_lock(&nn->client_lock);
159 renew_client_locked(clp);
160 spin_unlock(&nn->client_lock);
161 }
162
163 static void put_client_renew_locked(struct nfs4_client *clp)
164 {
165 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
166
167 lockdep_assert_held(&nn->client_lock);
168
169 if (!atomic_dec_and_test(&clp->cl_refcount))
170 return;
171 if (!is_client_expired(clp))
172 renew_client_locked(clp);
173 }
174
175 static void put_client_renew(struct nfs4_client *clp)
176 {
177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
178
179 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
180 return;
181 if (!is_client_expired(clp))
182 renew_client_locked(clp);
183 spin_unlock(&nn->client_lock);
184 }
185
186 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
187 {
188 __be32 status;
189
190 if (is_session_dead(ses))
191 return nfserr_badsession;
192 status = get_client_locked(ses->se_client);
193 if (status)
194 return status;
195 atomic_inc(&ses->se_ref);
196 return nfs_ok;
197 }
198
199 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
200 {
201 struct nfs4_client *clp = ses->se_client;
202 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
203
204 lockdep_assert_held(&nn->client_lock);
205
206 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
207 free_session(ses);
208 put_client_renew_locked(clp);
209 }
210
211 static void nfsd4_put_session(struct nfsd4_session *ses)
212 {
213 struct nfs4_client *clp = ses->se_client;
214 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
215
216 spin_lock(&nn->client_lock);
217 nfsd4_put_session_locked(ses);
218 spin_unlock(&nn->client_lock);
219 }
220
221 static inline struct nfs4_stateowner *
222 nfs4_get_stateowner(struct nfs4_stateowner *sop)
223 {
224 atomic_inc(&sop->so_count);
225 return sop;
226 }
227
228 static int
229 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
230 {
231 return (sop->so_owner.len == owner->len) &&
232 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
233 }
234
235 static struct nfs4_openowner *
236 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
237 struct nfs4_client *clp)
238 {
239 struct nfs4_stateowner *so;
240
241 lockdep_assert_held(&clp->cl_lock);
242
243 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
244 so_strhash) {
245 if (!so->so_is_open_owner)
246 continue;
247 if (same_owner_str(so, &open->op_owner))
248 return openowner(nfs4_get_stateowner(so));
249 }
250 return NULL;
251 }
252
253 static struct nfs4_openowner *
254 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
255 struct nfs4_client *clp)
256 {
257 struct nfs4_openowner *oo;
258
259 spin_lock(&clp->cl_lock);
260 oo = find_openstateowner_str_locked(hashval, open, clp);
261 spin_unlock(&clp->cl_lock);
262 return oo;
263 }
264
265 static inline u32
266 opaque_hashval(const void *ptr, int nbytes)
267 {
268 unsigned char *cptr = (unsigned char *) ptr;
269
270 u32 x = 0;
271 while (nbytes--) {
272 x *= 37;
273 x += *cptr++;
274 }
275 return x;
276 }
277
278 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
279 {
280 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
281
282 kmem_cache_free(file_slab, fp);
283 }
284
285 static inline void
286 put_nfs4_file(struct nfs4_file *fi)
287 {
288 might_lock(&state_lock);
289
290 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
291 hlist_del_rcu(&fi->fi_hash);
292 spin_unlock(&state_lock);
293 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
294 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
295 }
296 }
297
298 static inline void
299 get_nfs4_file(struct nfs4_file *fi)
300 {
301 atomic_inc(&fi->fi_ref);
302 }
303
304 static struct file *
305 __nfs4_get_fd(struct nfs4_file *f, int oflag)
306 {
307 if (f->fi_fds[oflag])
308 return get_file(f->fi_fds[oflag]);
309 return NULL;
310 }
311
312 static struct file *
313 find_writeable_file_locked(struct nfs4_file *f)
314 {
315 struct file *ret;
316
317 lockdep_assert_held(&f->fi_lock);
318
319 ret = __nfs4_get_fd(f, O_WRONLY);
320 if (!ret)
321 ret = __nfs4_get_fd(f, O_RDWR);
322 return ret;
323 }
324
325 static struct file *
326 find_writeable_file(struct nfs4_file *f)
327 {
328 struct file *ret;
329
330 spin_lock(&f->fi_lock);
331 ret = find_writeable_file_locked(f);
332 spin_unlock(&f->fi_lock);
333
334 return ret;
335 }
336
337 static struct file *find_readable_file_locked(struct nfs4_file *f)
338 {
339 struct file *ret;
340
341 lockdep_assert_held(&f->fi_lock);
342
343 ret = __nfs4_get_fd(f, O_RDONLY);
344 if (!ret)
345 ret = __nfs4_get_fd(f, O_RDWR);
346 return ret;
347 }
348
349 static struct file *
350 find_readable_file(struct nfs4_file *f)
351 {
352 struct file *ret;
353
354 spin_lock(&f->fi_lock);
355 ret = find_readable_file_locked(f);
356 spin_unlock(&f->fi_lock);
357
358 return ret;
359 }
360
361 static struct file *
362 find_any_file(struct nfs4_file *f)
363 {
364 struct file *ret;
365
366 spin_lock(&f->fi_lock);
367 ret = __nfs4_get_fd(f, O_RDWR);
368 if (!ret) {
369 ret = __nfs4_get_fd(f, O_WRONLY);
370 if (!ret)
371 ret = __nfs4_get_fd(f, O_RDONLY);
372 }
373 spin_unlock(&f->fi_lock);
374 return ret;
375 }
376
377 static atomic_long_t num_delegations;
378 unsigned long max_delegations;
379
380 /*
381 * Open owner state (share locks)
382 */
383
384 /* hash tables for lock and open owners */
385 #define OWNER_HASH_BITS 8
386 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
387 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
388
389 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
390 {
391 unsigned int ret;
392
393 ret = opaque_hashval(ownername->data, ownername->len);
394 return ret & OWNER_HASH_MASK;
395 }
396
397 /* hash table for nfs4_file */
398 #define FILE_HASH_BITS 8
399 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
400
401 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
402 {
403 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
404 }
405
406 static unsigned int file_hashval(struct knfsd_fh *fh)
407 {
408 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
409 }
410
411 static bool nfsd_fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
412 {
413 return fh1->fh_size == fh2->fh_size &&
414 !memcmp(fh1->fh_base.fh_pad,
415 fh2->fh_base.fh_pad,
416 fh1->fh_size);
417 }
418
419 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
420
421 static void
422 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
423 {
424 lockdep_assert_held(&fp->fi_lock);
425
426 if (access & NFS4_SHARE_ACCESS_WRITE)
427 atomic_inc(&fp->fi_access[O_WRONLY]);
428 if (access & NFS4_SHARE_ACCESS_READ)
429 atomic_inc(&fp->fi_access[O_RDONLY]);
430 }
431
432 static __be32
433 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
434 {
435 lockdep_assert_held(&fp->fi_lock);
436
437 /* Does this access mode make sense? */
438 if (access & ~NFS4_SHARE_ACCESS_BOTH)
439 return nfserr_inval;
440
441 /* Does it conflict with a deny mode already set? */
442 if ((access & fp->fi_share_deny) != 0)
443 return nfserr_share_denied;
444
445 __nfs4_file_get_access(fp, access);
446 return nfs_ok;
447 }
448
449 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
450 {
451 /* Common case is that there is no deny mode. */
452 if (deny) {
453 /* Does this deny mode make sense? */
454 if (deny & ~NFS4_SHARE_DENY_BOTH)
455 return nfserr_inval;
456
457 if ((deny & NFS4_SHARE_DENY_READ) &&
458 atomic_read(&fp->fi_access[O_RDONLY]))
459 return nfserr_share_denied;
460
461 if ((deny & NFS4_SHARE_DENY_WRITE) &&
462 atomic_read(&fp->fi_access[O_WRONLY]))
463 return nfserr_share_denied;
464 }
465 return nfs_ok;
466 }
467
468 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
469 {
470 might_lock(&fp->fi_lock);
471
472 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
473 struct file *f1 = NULL;
474 struct file *f2 = NULL;
475
476 swap(f1, fp->fi_fds[oflag]);
477 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
478 swap(f2, fp->fi_fds[O_RDWR]);
479 spin_unlock(&fp->fi_lock);
480 if (f1)
481 fput(f1);
482 if (f2)
483 fput(f2);
484 }
485 }
486
487 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
488 {
489 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
490
491 if (access & NFS4_SHARE_ACCESS_WRITE)
492 __nfs4_file_put_access(fp, O_WRONLY);
493 if (access & NFS4_SHARE_ACCESS_READ)
494 __nfs4_file_put_access(fp, O_RDONLY);
495 }
496
497 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
498 struct kmem_cache *slab)
499 {
500 struct nfs4_stid *stid;
501 int new_id;
502
503 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
504 if (!stid)
505 return NULL;
506
507 idr_preload(GFP_KERNEL);
508 spin_lock(&cl->cl_lock);
509 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
510 spin_unlock(&cl->cl_lock);
511 idr_preload_end();
512 if (new_id < 0)
513 goto out_free;
514 stid->sc_client = cl;
515 stid->sc_stateid.si_opaque.so_id = new_id;
516 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
517 /* Will be incremented before return to client: */
518 atomic_set(&stid->sc_count, 1);
519
520 /*
521 * It shouldn't be a problem to reuse an opaque stateid value.
522 * I don't think it is for 4.1. But with 4.0 I worry that, for
523 * example, a stray write retransmission could be accepted by
524 * the server when it should have been rejected. Therefore,
525 * adopt a trick from the sctp code to attempt to maximize the
526 * amount of time until an id is reused, by ensuring they always
527 * "increase" (mod INT_MAX):
528 */
529 return stid;
530 out_free:
531 kmem_cache_free(slab, stid);
532 return NULL;
533 }
534
535 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
536 {
537 struct nfs4_stid *stid;
538 struct nfs4_ol_stateid *stp;
539
540 stid = nfs4_alloc_stid(clp, stateid_slab);
541 if (!stid)
542 return NULL;
543
544 stp = openlockstateid(stid);
545 stp->st_stid.sc_free = nfs4_free_ol_stateid;
546 return stp;
547 }
548
549 static void nfs4_free_deleg(struct nfs4_stid *stid)
550 {
551 kmem_cache_free(deleg_slab, stid);
552 atomic_long_dec(&num_delegations);
553 }
554
555 /*
556 * When we recall a delegation, we should be careful not to hand it
557 * out again straight away.
558 * To ensure this we keep a pair of bloom filters ('new' and 'old')
559 * in which the filehandles of recalled delegations are "stored".
560 * If a filehandle appear in either filter, a delegation is blocked.
561 * When a delegation is recalled, the filehandle is stored in the "new"
562 * filter.
563 * Every 30 seconds we swap the filters and clear the "new" one,
564 * unless both are empty of course.
565 *
566 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
567 * low 3 bytes as hash-table indices.
568 *
569 * 'blocked_delegations_lock', which is always taken in block_delegations(),
570 * is used to manage concurrent access. Testing does not need the lock
571 * except when swapping the two filters.
572 */
573 static DEFINE_SPINLOCK(blocked_delegations_lock);
574 static struct bloom_pair {
575 int entries, old_entries;
576 time_t swap_time;
577 int new; /* index into 'set' */
578 DECLARE_BITMAP(set[2], 256);
579 } blocked_delegations;
580
581 static int delegation_blocked(struct knfsd_fh *fh)
582 {
583 u32 hash;
584 struct bloom_pair *bd = &blocked_delegations;
585
586 if (bd->entries == 0)
587 return 0;
588 if (seconds_since_boot() - bd->swap_time > 30) {
589 spin_lock(&blocked_delegations_lock);
590 if (seconds_since_boot() - bd->swap_time > 30) {
591 bd->entries -= bd->old_entries;
592 bd->old_entries = bd->entries;
593 memset(bd->set[bd->new], 0,
594 sizeof(bd->set[0]));
595 bd->new = 1-bd->new;
596 bd->swap_time = seconds_since_boot();
597 }
598 spin_unlock(&blocked_delegations_lock);
599 }
600 hash = jhash(&fh->fh_base, fh->fh_size, 0);
601 if (test_bit(hash&255, bd->set[0]) &&
602 test_bit((hash>>8)&255, bd->set[0]) &&
603 test_bit((hash>>16)&255, bd->set[0]))
604 return 1;
605
606 if (test_bit(hash&255, bd->set[1]) &&
607 test_bit((hash>>8)&255, bd->set[1]) &&
608 test_bit((hash>>16)&255, bd->set[1]))
609 return 1;
610
611 return 0;
612 }
613
614 static void block_delegations(struct knfsd_fh *fh)
615 {
616 u32 hash;
617 struct bloom_pair *bd = &blocked_delegations;
618
619 hash = jhash(&fh->fh_base, fh->fh_size, 0);
620
621 spin_lock(&blocked_delegations_lock);
622 __set_bit(hash&255, bd->set[bd->new]);
623 __set_bit((hash>>8)&255, bd->set[bd->new]);
624 __set_bit((hash>>16)&255, bd->set[bd->new]);
625 if (bd->entries == 0)
626 bd->swap_time = seconds_since_boot();
627 bd->entries += 1;
628 spin_unlock(&blocked_delegations_lock);
629 }
630
631 static struct nfs4_delegation *
632 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
633 {
634 struct nfs4_delegation *dp;
635 long n;
636
637 dprintk("NFSD alloc_init_deleg\n");
638 n = atomic_long_inc_return(&num_delegations);
639 if (n < 0 || n > max_delegations)
640 goto out_dec;
641 if (delegation_blocked(&current_fh->fh_handle))
642 goto out_dec;
643 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
644 if (dp == NULL)
645 goto out_dec;
646
647 dp->dl_stid.sc_free = nfs4_free_deleg;
648 /*
649 * delegation seqid's are never incremented. The 4.1 special
650 * meaning of seqid 0 isn't meaningful, really, but let's avoid
651 * 0 anyway just for consistency and use 1:
652 */
653 dp->dl_stid.sc_stateid.si_generation = 1;
654 INIT_LIST_HEAD(&dp->dl_perfile);
655 INIT_LIST_HEAD(&dp->dl_perclnt);
656 INIT_LIST_HEAD(&dp->dl_recall_lru);
657 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
658 dp->dl_retries = 1;
659 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
660 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
661 return dp;
662 out_dec:
663 atomic_long_dec(&num_delegations);
664 return NULL;
665 }
666
667 void
668 nfs4_put_stid(struct nfs4_stid *s)
669 {
670 struct nfs4_file *fp = s->sc_file;
671 struct nfs4_client *clp = s->sc_client;
672
673 might_lock(&clp->cl_lock);
674
675 if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
676 wake_up_all(&close_wq);
677 return;
678 }
679 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
680 spin_unlock(&clp->cl_lock);
681 s->sc_free(s);
682 if (fp)
683 put_nfs4_file(fp);
684 }
685
686 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
687 {
688 struct file *filp = NULL;
689
690 spin_lock(&fp->fi_lock);
691 if (fp->fi_deleg_file && atomic_dec_and_test(&fp->fi_delegees))
692 swap(filp, fp->fi_deleg_file);
693 spin_unlock(&fp->fi_lock);
694
695 if (filp) {
696 vfs_setlease(filp, F_UNLCK, NULL, NULL);
697 fput(filp);
698 }
699 }
700
701 static void unhash_stid(struct nfs4_stid *s)
702 {
703 s->sc_type = 0;
704 }
705
706 static void
707 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
708 {
709 lockdep_assert_held(&state_lock);
710 lockdep_assert_held(&fp->fi_lock);
711
712 atomic_inc(&dp->dl_stid.sc_count);
713 dp->dl_stid.sc_type = NFS4_DELEG_STID;
714 list_add(&dp->dl_perfile, &fp->fi_delegations);
715 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
716 }
717
718 static void
719 unhash_delegation_locked(struct nfs4_delegation *dp)
720 {
721 struct nfs4_file *fp = dp->dl_stid.sc_file;
722
723 lockdep_assert_held(&state_lock);
724
725 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
726 /* Ensure that deleg break won't try to requeue it */
727 ++dp->dl_time;
728 spin_lock(&fp->fi_lock);
729 list_del_init(&dp->dl_perclnt);
730 list_del_init(&dp->dl_recall_lru);
731 list_del_init(&dp->dl_perfile);
732 spin_unlock(&fp->fi_lock);
733 }
734
735 static void destroy_delegation(struct nfs4_delegation *dp)
736 {
737 spin_lock(&state_lock);
738 unhash_delegation_locked(dp);
739 spin_unlock(&state_lock);
740 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
741 nfs4_put_stid(&dp->dl_stid);
742 }
743
744 static void revoke_delegation(struct nfs4_delegation *dp)
745 {
746 struct nfs4_client *clp = dp->dl_stid.sc_client;
747
748 WARN_ON(!list_empty(&dp->dl_recall_lru));
749
750 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
751
752 if (clp->cl_minorversion == 0)
753 nfs4_put_stid(&dp->dl_stid);
754 else {
755 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
756 spin_lock(&clp->cl_lock);
757 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
758 spin_unlock(&clp->cl_lock);
759 }
760 }
761
762 /*
763 * SETCLIENTID state
764 */
765
766 static unsigned int clientid_hashval(u32 id)
767 {
768 return id & CLIENT_HASH_MASK;
769 }
770
771 static unsigned int clientstr_hashval(const char *name)
772 {
773 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
774 }
775
776 /*
777 * We store the NONE, READ, WRITE, and BOTH bits separately in the
778 * st_{access,deny}_bmap field of the stateid, in order to track not
779 * only what share bits are currently in force, but also what
780 * combinations of share bits previous opens have used. This allows us
781 * to enforce the recommendation of rfc 3530 14.2.19 that the server
782 * return an error if the client attempt to downgrade to a combination
783 * of share bits not explicable by closing some of its previous opens.
784 *
785 * XXX: This enforcement is actually incomplete, since we don't keep
786 * track of access/deny bit combinations; so, e.g., we allow:
787 *
788 * OPEN allow read, deny write
789 * OPEN allow both, deny none
790 * DOWNGRADE allow read, deny none
791 *
792 * which we should reject.
793 */
794 static unsigned int
795 bmap_to_share_mode(unsigned long bmap) {
796 int i;
797 unsigned int access = 0;
798
799 for (i = 1; i < 4; i++) {
800 if (test_bit(i, &bmap))
801 access |= i;
802 }
803 return access;
804 }
805
806 /* set share access for a given stateid */
807 static inline void
808 set_access(u32 access, struct nfs4_ol_stateid *stp)
809 {
810 unsigned char mask = 1 << access;
811
812 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
813 stp->st_access_bmap |= mask;
814 }
815
816 /* clear share access for a given stateid */
817 static inline void
818 clear_access(u32 access, struct nfs4_ol_stateid *stp)
819 {
820 unsigned char mask = 1 << access;
821
822 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
823 stp->st_access_bmap &= ~mask;
824 }
825
826 /* test whether a given stateid has access */
827 static inline bool
828 test_access(u32 access, struct nfs4_ol_stateid *stp)
829 {
830 unsigned char mask = 1 << access;
831
832 return (bool)(stp->st_access_bmap & mask);
833 }
834
835 /* set share deny for a given stateid */
836 static inline void
837 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
838 {
839 unsigned char mask = 1 << deny;
840
841 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
842 stp->st_deny_bmap |= mask;
843 }
844
845 /* clear share deny for a given stateid */
846 static inline void
847 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
848 {
849 unsigned char mask = 1 << deny;
850
851 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
852 stp->st_deny_bmap &= ~mask;
853 }
854
855 /* test whether a given stateid is denying specific access */
856 static inline bool
857 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
858 {
859 unsigned char mask = 1 << deny;
860
861 return (bool)(stp->st_deny_bmap & mask);
862 }
863
864 static int nfs4_access_to_omode(u32 access)
865 {
866 switch (access & NFS4_SHARE_ACCESS_BOTH) {
867 case NFS4_SHARE_ACCESS_READ:
868 return O_RDONLY;
869 case NFS4_SHARE_ACCESS_WRITE:
870 return O_WRONLY;
871 case NFS4_SHARE_ACCESS_BOTH:
872 return O_RDWR;
873 }
874 WARN_ON_ONCE(1);
875 return O_RDONLY;
876 }
877
878 /*
879 * A stateid that had a deny mode associated with it is being released
880 * or downgraded. Recalculate the deny mode on the file.
881 */
882 static void
883 recalculate_deny_mode(struct nfs4_file *fp)
884 {
885 struct nfs4_ol_stateid *stp;
886
887 spin_lock(&fp->fi_lock);
888 fp->fi_share_deny = 0;
889 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
890 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
891 spin_unlock(&fp->fi_lock);
892 }
893
894 static void
895 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
896 {
897 int i;
898 bool change = false;
899
900 for (i = 1; i < 4; i++) {
901 if ((i & deny) != i) {
902 change = true;
903 clear_deny(i, stp);
904 }
905 }
906
907 /* Recalculate per-file deny mode if there was a change */
908 if (change)
909 recalculate_deny_mode(stp->st_stid.sc_file);
910 }
911
912 /* release all access and file references for a given stateid */
913 static void
914 release_all_access(struct nfs4_ol_stateid *stp)
915 {
916 int i;
917 struct nfs4_file *fp = stp->st_stid.sc_file;
918
919 if (fp && stp->st_deny_bmap != 0)
920 recalculate_deny_mode(fp);
921
922 for (i = 1; i < 4; i++) {
923 if (test_access(i, stp))
924 nfs4_file_put_access(stp->st_stid.sc_file, i);
925 clear_access(i, stp);
926 }
927 }
928
929 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
930 {
931 struct nfs4_client *clp = sop->so_client;
932
933 might_lock(&clp->cl_lock);
934
935 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
936 return;
937 sop->so_ops->so_unhash(sop);
938 spin_unlock(&clp->cl_lock);
939 kfree(sop->so_owner.data);
940 sop->so_ops->so_free(sop);
941 }
942
943 static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
944 {
945 struct nfs4_file *fp = stp->st_stid.sc_file;
946
947 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
948
949 spin_lock(&fp->fi_lock);
950 list_del(&stp->st_perfile);
951 spin_unlock(&fp->fi_lock);
952 list_del(&stp->st_perstateowner);
953 }
954
955 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
956 {
957 struct nfs4_ol_stateid *stp = openlockstateid(stid);
958
959 release_all_access(stp);
960 if (stp->st_stateowner)
961 nfs4_put_stateowner(stp->st_stateowner);
962 kmem_cache_free(stateid_slab, stid);
963 }
964
965 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
966 {
967 struct nfs4_ol_stateid *stp = openlockstateid(stid);
968 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
969 struct file *file;
970
971 file = find_any_file(stp->st_stid.sc_file);
972 if (file)
973 filp_close(file, (fl_owner_t)lo);
974 nfs4_free_ol_stateid(stid);
975 }
976
977 /*
978 * Put the persistent reference to an already unhashed generic stateid, while
979 * holding the cl_lock. If it's the last reference, then put it onto the
980 * reaplist for later destruction.
981 */
982 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
983 struct list_head *reaplist)
984 {
985 struct nfs4_stid *s = &stp->st_stid;
986 struct nfs4_client *clp = s->sc_client;
987
988 lockdep_assert_held(&clp->cl_lock);
989
990 WARN_ON_ONCE(!list_empty(&stp->st_locks));
991
992 if (!atomic_dec_and_test(&s->sc_count)) {
993 wake_up_all(&close_wq);
994 return;
995 }
996
997 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
998 list_add(&stp->st_locks, reaplist);
999 }
1000
1001 static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1002 {
1003 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
1004
1005 lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
1006
1007 list_del_init(&stp->st_locks);
1008 unhash_ol_stateid(stp);
1009 unhash_stid(&stp->st_stid);
1010 }
1011
1012 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1013 {
1014 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
1015
1016 spin_lock(&oo->oo_owner.so_client->cl_lock);
1017 unhash_lock_stateid(stp);
1018 spin_unlock(&oo->oo_owner.so_client->cl_lock);
1019 nfs4_put_stid(&stp->st_stid);
1020 }
1021
1022 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1023 {
1024 struct nfs4_client *clp = lo->lo_owner.so_client;
1025
1026 lockdep_assert_held(&clp->cl_lock);
1027
1028 list_del_init(&lo->lo_owner.so_strhash);
1029 }
1030
1031 /*
1032 * Free a list of generic stateids that were collected earlier after being
1033 * fully unhashed.
1034 */
1035 static void
1036 free_ol_stateid_reaplist(struct list_head *reaplist)
1037 {
1038 struct nfs4_ol_stateid *stp;
1039 struct nfs4_file *fp;
1040
1041 might_sleep();
1042
1043 while (!list_empty(reaplist)) {
1044 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1045 st_locks);
1046 list_del(&stp->st_locks);
1047 fp = stp->st_stid.sc_file;
1048 stp->st_stid.sc_free(&stp->st_stid);
1049 if (fp)
1050 put_nfs4_file(fp);
1051 }
1052 }
1053
1054 static void release_lockowner(struct nfs4_lockowner *lo)
1055 {
1056 struct nfs4_client *clp = lo->lo_owner.so_client;
1057 struct nfs4_ol_stateid *stp;
1058 struct list_head reaplist;
1059
1060 INIT_LIST_HEAD(&reaplist);
1061
1062 spin_lock(&clp->cl_lock);
1063 unhash_lockowner_locked(lo);
1064 while (!list_empty(&lo->lo_owner.so_stateids)) {
1065 stp = list_first_entry(&lo->lo_owner.so_stateids,
1066 struct nfs4_ol_stateid, st_perstateowner);
1067 unhash_lock_stateid(stp);
1068 put_ol_stateid_locked(stp, &reaplist);
1069 }
1070 spin_unlock(&clp->cl_lock);
1071 free_ol_stateid_reaplist(&reaplist);
1072 nfs4_put_stateowner(&lo->lo_owner);
1073 }
1074
1075 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1076 struct list_head *reaplist)
1077 {
1078 struct nfs4_ol_stateid *stp;
1079
1080 while (!list_empty(&open_stp->st_locks)) {
1081 stp = list_entry(open_stp->st_locks.next,
1082 struct nfs4_ol_stateid, st_locks);
1083 unhash_lock_stateid(stp);
1084 put_ol_stateid_locked(stp, reaplist);
1085 }
1086 }
1087
1088 static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
1089 struct list_head *reaplist)
1090 {
1091 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1092
1093 unhash_ol_stateid(stp);
1094 release_open_stateid_locks(stp, reaplist);
1095 }
1096
1097 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1098 {
1099 LIST_HEAD(reaplist);
1100
1101 spin_lock(&stp->st_stid.sc_client->cl_lock);
1102 unhash_open_stateid(stp, &reaplist);
1103 put_ol_stateid_locked(stp, &reaplist);
1104 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1105 free_ol_stateid_reaplist(&reaplist);
1106 }
1107
1108 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1109 {
1110 struct nfs4_client *clp = oo->oo_owner.so_client;
1111
1112 lockdep_assert_held(&clp->cl_lock);
1113
1114 list_del_init(&oo->oo_owner.so_strhash);
1115 list_del_init(&oo->oo_perclient);
1116 }
1117
1118 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1119 {
1120 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1121 nfsd_net_id);
1122 struct nfs4_ol_stateid *s;
1123
1124 spin_lock(&nn->client_lock);
1125 s = oo->oo_last_closed_stid;
1126 if (s) {
1127 list_del_init(&oo->oo_close_lru);
1128 oo->oo_last_closed_stid = NULL;
1129 }
1130 spin_unlock(&nn->client_lock);
1131 if (s)
1132 nfs4_put_stid(&s->st_stid);
1133 }
1134
1135 static void release_openowner(struct nfs4_openowner *oo)
1136 {
1137 struct nfs4_ol_stateid *stp;
1138 struct nfs4_client *clp = oo->oo_owner.so_client;
1139 struct list_head reaplist;
1140
1141 INIT_LIST_HEAD(&reaplist);
1142
1143 spin_lock(&clp->cl_lock);
1144 unhash_openowner_locked(oo);
1145 while (!list_empty(&oo->oo_owner.so_stateids)) {
1146 stp = list_first_entry(&oo->oo_owner.so_stateids,
1147 struct nfs4_ol_stateid, st_perstateowner);
1148 unhash_open_stateid(stp, &reaplist);
1149 put_ol_stateid_locked(stp, &reaplist);
1150 }
1151 spin_unlock(&clp->cl_lock);
1152 free_ol_stateid_reaplist(&reaplist);
1153 release_last_closed_stateid(oo);
1154 nfs4_put_stateowner(&oo->oo_owner);
1155 }
1156
1157 static inline int
1158 hash_sessionid(struct nfs4_sessionid *sessionid)
1159 {
1160 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1161
1162 return sid->sequence % SESSION_HASH_SIZE;
1163 }
1164
1165 #ifdef NFSD_DEBUG
1166 static inline void
1167 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1168 {
1169 u32 *ptr = (u32 *)(&sessionid->data[0]);
1170 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1171 }
1172 #else
1173 static inline void
1174 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1175 {
1176 }
1177 #endif
1178
1179 /*
1180 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1181 * won't be used for replay.
1182 */
1183 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1184 {
1185 struct nfs4_stateowner *so = cstate->replay_owner;
1186
1187 if (nfserr == nfserr_replay_me)
1188 return;
1189
1190 if (!seqid_mutating_err(ntohl(nfserr))) {
1191 nfsd4_cstate_clear_replay(cstate);
1192 return;
1193 }
1194 if (!so)
1195 return;
1196 if (so->so_is_open_owner)
1197 release_last_closed_stateid(openowner(so));
1198 so->so_seqid++;
1199 return;
1200 }
1201
1202 static void
1203 gen_sessionid(struct nfsd4_session *ses)
1204 {
1205 struct nfs4_client *clp = ses->se_client;
1206 struct nfsd4_sessionid *sid;
1207
1208 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1209 sid->clientid = clp->cl_clientid;
1210 sid->sequence = current_sessionid++;
1211 sid->reserved = 0;
1212 }
1213
1214 /*
1215 * The protocol defines ca_maxresponssize_cached to include the size of
1216 * the rpc header, but all we need to cache is the data starting after
1217 * the end of the initial SEQUENCE operation--the rest we regenerate
1218 * each time. Therefore we can advertise a ca_maxresponssize_cached
1219 * value that is the number of bytes in our cache plus a few additional
1220 * bytes. In order to stay on the safe side, and not promise more than
1221 * we can cache, those additional bytes must be the minimum possible: 24
1222 * bytes of rpc header (xid through accept state, with AUTH_NULL
1223 * verifier), 12 for the compound header (with zero-length tag), and 44
1224 * for the SEQUENCE op response:
1225 */
1226 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1227
1228 static void
1229 free_session_slots(struct nfsd4_session *ses)
1230 {
1231 int i;
1232
1233 for (i = 0; i < ses->se_fchannel.maxreqs; i++)
1234 kfree(ses->se_slots[i]);
1235 }
1236
1237 /*
1238 * We don't actually need to cache the rpc and session headers, so we
1239 * can allocate a little less for each slot:
1240 */
1241 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1242 {
1243 u32 size;
1244
1245 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1246 size = 0;
1247 else
1248 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1249 return size + sizeof(struct nfsd4_slot);
1250 }
1251
1252 /*
1253 * XXX: If we run out of reserved DRC memory we could (up to a point)
1254 * re-negotiate active sessions and reduce their slot usage to make
1255 * room for new connections. For now we just fail the create session.
1256 */
1257 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1258 {
1259 u32 slotsize = slot_bytes(ca);
1260 u32 num = ca->maxreqs;
1261 int avail;
1262
1263 spin_lock(&nfsd_drc_lock);
1264 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1265 nfsd_drc_max_mem - nfsd_drc_mem_used);
1266 num = min_t(int, num, avail / slotsize);
1267 nfsd_drc_mem_used += num * slotsize;
1268 spin_unlock(&nfsd_drc_lock);
1269
1270 return num;
1271 }
1272
1273 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1274 {
1275 int slotsize = slot_bytes(ca);
1276
1277 spin_lock(&nfsd_drc_lock);
1278 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1279 spin_unlock(&nfsd_drc_lock);
1280 }
1281
1282 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1283 struct nfsd4_channel_attrs *battrs)
1284 {
1285 int numslots = fattrs->maxreqs;
1286 int slotsize = slot_bytes(fattrs);
1287 struct nfsd4_session *new;
1288 int mem, i;
1289
1290 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1291 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1292 mem = numslots * sizeof(struct nfsd4_slot *);
1293
1294 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1295 if (!new)
1296 return NULL;
1297 /* allocate each struct nfsd4_slot and data cache in one piece */
1298 for (i = 0; i < numslots; i++) {
1299 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1300 if (!new->se_slots[i])
1301 goto out_free;
1302 }
1303
1304 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1305 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1306
1307 return new;
1308 out_free:
1309 while (i--)
1310 kfree(new->se_slots[i]);
1311 kfree(new);
1312 return NULL;
1313 }
1314
1315 static void free_conn(struct nfsd4_conn *c)
1316 {
1317 svc_xprt_put(c->cn_xprt);
1318 kfree(c);
1319 }
1320
1321 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1322 {
1323 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1324 struct nfs4_client *clp = c->cn_session->se_client;
1325
1326 spin_lock(&clp->cl_lock);
1327 if (!list_empty(&c->cn_persession)) {
1328 list_del(&c->cn_persession);
1329 free_conn(c);
1330 }
1331 nfsd4_probe_callback(clp);
1332 spin_unlock(&clp->cl_lock);
1333 }
1334
1335 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1336 {
1337 struct nfsd4_conn *conn;
1338
1339 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1340 if (!conn)
1341 return NULL;
1342 svc_xprt_get(rqstp->rq_xprt);
1343 conn->cn_xprt = rqstp->rq_xprt;
1344 conn->cn_flags = flags;
1345 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1346 return conn;
1347 }
1348
1349 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1350 {
1351 conn->cn_session = ses;
1352 list_add(&conn->cn_persession, &ses->se_conns);
1353 }
1354
1355 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1356 {
1357 struct nfs4_client *clp = ses->se_client;
1358
1359 spin_lock(&clp->cl_lock);
1360 __nfsd4_hash_conn(conn, ses);
1361 spin_unlock(&clp->cl_lock);
1362 }
1363
1364 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1365 {
1366 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1367 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1368 }
1369
1370 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1371 {
1372 int ret;
1373
1374 nfsd4_hash_conn(conn, ses);
1375 ret = nfsd4_register_conn(conn);
1376 if (ret)
1377 /* oops; xprt is already down: */
1378 nfsd4_conn_lost(&conn->cn_xpt_user);
1379 /* We may have gained or lost a callback channel: */
1380 nfsd4_probe_callback_sync(ses->se_client);
1381 }
1382
1383 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1384 {
1385 u32 dir = NFS4_CDFC4_FORE;
1386
1387 if (cses->flags & SESSION4_BACK_CHAN)
1388 dir |= NFS4_CDFC4_BACK;
1389 return alloc_conn(rqstp, dir);
1390 }
1391
1392 /* must be called under client_lock */
1393 static void nfsd4_del_conns(struct nfsd4_session *s)
1394 {
1395 struct nfs4_client *clp = s->se_client;
1396 struct nfsd4_conn *c;
1397
1398 spin_lock(&clp->cl_lock);
1399 while (!list_empty(&s->se_conns)) {
1400 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1401 list_del_init(&c->cn_persession);
1402 spin_unlock(&clp->cl_lock);
1403
1404 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1405 free_conn(c);
1406
1407 spin_lock(&clp->cl_lock);
1408 }
1409 spin_unlock(&clp->cl_lock);
1410 }
1411
1412 static void __free_session(struct nfsd4_session *ses)
1413 {
1414 free_session_slots(ses);
1415 kfree(ses);
1416 }
1417
1418 static void free_session(struct nfsd4_session *ses)
1419 {
1420 nfsd4_del_conns(ses);
1421 nfsd4_put_drc_mem(&ses->se_fchannel);
1422 __free_session(ses);
1423 }
1424
1425 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1426 {
1427 int idx;
1428 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1429
1430 new->se_client = clp;
1431 gen_sessionid(new);
1432
1433 INIT_LIST_HEAD(&new->se_conns);
1434
1435 new->se_cb_seq_nr = 1;
1436 new->se_flags = cses->flags;
1437 new->se_cb_prog = cses->callback_prog;
1438 new->se_cb_sec = cses->cb_sec;
1439 atomic_set(&new->se_ref, 0);
1440 idx = hash_sessionid(&new->se_sessionid);
1441 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1442 spin_lock(&clp->cl_lock);
1443 list_add(&new->se_perclnt, &clp->cl_sessions);
1444 spin_unlock(&clp->cl_lock);
1445
1446 {
1447 struct sockaddr *sa = svc_addr(rqstp);
1448 /*
1449 * This is a little silly; with sessions there's no real
1450 * use for the callback address. Use the peer address
1451 * as a reasonable default for now, but consider fixing
1452 * the rpc client not to require an address in the
1453 * future:
1454 */
1455 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1456 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1457 }
1458 }
1459
1460 /* caller must hold client_lock */
1461 static struct nfsd4_session *
1462 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1463 {
1464 struct nfsd4_session *elem;
1465 int idx;
1466 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1467
1468 lockdep_assert_held(&nn->client_lock);
1469
1470 dump_sessionid(__func__, sessionid);
1471 idx = hash_sessionid(sessionid);
1472 /* Search in the appropriate list */
1473 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1474 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1475 NFS4_MAX_SESSIONID_LEN)) {
1476 return elem;
1477 }
1478 }
1479
1480 dprintk("%s: session not found\n", __func__);
1481 return NULL;
1482 }
1483
1484 static struct nfsd4_session *
1485 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1486 __be32 *ret)
1487 {
1488 struct nfsd4_session *session;
1489 __be32 status = nfserr_badsession;
1490
1491 session = __find_in_sessionid_hashtbl(sessionid, net);
1492 if (!session)
1493 goto out;
1494 status = nfsd4_get_session_locked(session);
1495 if (status)
1496 session = NULL;
1497 out:
1498 *ret = status;
1499 return session;
1500 }
1501
1502 /* caller must hold client_lock */
1503 static void
1504 unhash_session(struct nfsd4_session *ses)
1505 {
1506 struct nfs4_client *clp = ses->se_client;
1507 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1508
1509 lockdep_assert_held(&nn->client_lock);
1510
1511 list_del(&ses->se_hash);
1512 spin_lock(&ses->se_client->cl_lock);
1513 list_del(&ses->se_perclnt);
1514 spin_unlock(&ses->se_client->cl_lock);
1515 }
1516
1517 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1518 static int
1519 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1520 {
1521 if (clid->cl_boot == nn->boot_time)
1522 return 0;
1523 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1524 clid->cl_boot, clid->cl_id, nn->boot_time);
1525 return 1;
1526 }
1527
1528 /*
1529 * XXX Should we use a slab cache ?
1530 * This type of memory management is somewhat inefficient, but we use it
1531 * anyway since SETCLIENTID is not a common operation.
1532 */
1533 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1534 {
1535 struct nfs4_client *clp;
1536 int i;
1537
1538 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1539 if (clp == NULL)
1540 return NULL;
1541 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1542 if (clp->cl_name.data == NULL)
1543 goto err_no_name;
1544 clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1545 OWNER_HASH_SIZE, GFP_KERNEL);
1546 if (!clp->cl_ownerstr_hashtbl)
1547 goto err_no_hashtbl;
1548 for (i = 0; i < OWNER_HASH_SIZE; i++)
1549 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1550 clp->cl_name.len = name.len;
1551 INIT_LIST_HEAD(&clp->cl_sessions);
1552 idr_init(&clp->cl_stateids);
1553 atomic_set(&clp->cl_refcount, 0);
1554 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1555 INIT_LIST_HEAD(&clp->cl_idhash);
1556 INIT_LIST_HEAD(&clp->cl_openowners);
1557 INIT_LIST_HEAD(&clp->cl_delegations);
1558 INIT_LIST_HEAD(&clp->cl_lru);
1559 INIT_LIST_HEAD(&clp->cl_callbacks);
1560 INIT_LIST_HEAD(&clp->cl_revoked);
1561 spin_lock_init(&clp->cl_lock);
1562 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1563 return clp;
1564 err_no_hashtbl:
1565 kfree(clp->cl_name.data);
1566 err_no_name:
1567 kfree(clp);
1568 return NULL;
1569 }
1570
1571 static void
1572 free_client(struct nfs4_client *clp)
1573 {
1574 while (!list_empty(&clp->cl_sessions)) {
1575 struct nfsd4_session *ses;
1576 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1577 se_perclnt);
1578 list_del(&ses->se_perclnt);
1579 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1580 free_session(ses);
1581 }
1582 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1583 free_svc_cred(&clp->cl_cred);
1584 kfree(clp->cl_ownerstr_hashtbl);
1585 kfree(clp->cl_name.data);
1586 idr_destroy(&clp->cl_stateids);
1587 kfree(clp);
1588 }
1589
1590 /* must be called under the client_lock */
1591 static void
1592 unhash_client_locked(struct nfs4_client *clp)
1593 {
1594 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1595 struct nfsd4_session *ses;
1596
1597 lockdep_assert_held(&nn->client_lock);
1598
1599 /* Mark the client as expired! */
1600 clp->cl_time = 0;
1601 /* Make it invisible */
1602 if (!list_empty(&clp->cl_idhash)) {
1603 list_del_init(&clp->cl_idhash);
1604 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1605 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1606 else
1607 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1608 }
1609 list_del_init(&clp->cl_lru);
1610 spin_lock(&clp->cl_lock);
1611 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1612 list_del_init(&ses->se_hash);
1613 spin_unlock(&clp->cl_lock);
1614 }
1615
1616 static void
1617 unhash_client(struct nfs4_client *clp)
1618 {
1619 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1620
1621 spin_lock(&nn->client_lock);
1622 unhash_client_locked(clp);
1623 spin_unlock(&nn->client_lock);
1624 }
1625
1626 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1627 {
1628 if (atomic_read(&clp->cl_refcount))
1629 return nfserr_jukebox;
1630 unhash_client_locked(clp);
1631 return nfs_ok;
1632 }
1633
1634 static void
1635 __destroy_client(struct nfs4_client *clp)
1636 {
1637 struct nfs4_openowner *oo;
1638 struct nfs4_delegation *dp;
1639 struct list_head reaplist;
1640
1641 INIT_LIST_HEAD(&reaplist);
1642 spin_lock(&state_lock);
1643 while (!list_empty(&clp->cl_delegations)) {
1644 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1645 unhash_delegation_locked(dp);
1646 list_add(&dp->dl_recall_lru, &reaplist);
1647 }
1648 spin_unlock(&state_lock);
1649 while (!list_empty(&reaplist)) {
1650 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1651 list_del_init(&dp->dl_recall_lru);
1652 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1653 nfs4_put_stid(&dp->dl_stid);
1654 }
1655 while (!list_empty(&clp->cl_revoked)) {
1656 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1657 list_del_init(&dp->dl_recall_lru);
1658 nfs4_put_stid(&dp->dl_stid);
1659 }
1660 while (!list_empty(&clp->cl_openowners)) {
1661 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1662 nfs4_get_stateowner(&oo->oo_owner);
1663 release_openowner(oo);
1664 }
1665 nfsd4_shutdown_callback(clp);
1666 if (clp->cl_cb_conn.cb_xprt)
1667 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1668 free_client(clp);
1669 }
1670
1671 static void
1672 destroy_client(struct nfs4_client *clp)
1673 {
1674 unhash_client(clp);
1675 __destroy_client(clp);
1676 }
1677
1678 static void expire_client(struct nfs4_client *clp)
1679 {
1680 unhash_client(clp);
1681 nfsd4_client_record_remove(clp);
1682 __destroy_client(clp);
1683 }
1684
1685 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1686 {
1687 memcpy(target->cl_verifier.data, source->data,
1688 sizeof(target->cl_verifier.data));
1689 }
1690
1691 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1692 {
1693 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1694 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1695 }
1696
1697 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1698 {
1699 if (source->cr_principal) {
1700 target->cr_principal =
1701 kstrdup(source->cr_principal, GFP_KERNEL);
1702 if (target->cr_principal == NULL)
1703 return -ENOMEM;
1704 } else
1705 target->cr_principal = NULL;
1706 target->cr_flavor = source->cr_flavor;
1707 target->cr_uid = source->cr_uid;
1708 target->cr_gid = source->cr_gid;
1709 target->cr_group_info = source->cr_group_info;
1710 get_group_info(target->cr_group_info);
1711 target->cr_gss_mech = source->cr_gss_mech;
1712 if (source->cr_gss_mech)
1713 gss_mech_get(source->cr_gss_mech);
1714 return 0;
1715 }
1716
1717 static int
1718 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1719 {
1720 if (o1->len < o2->len)
1721 return -1;
1722 if (o1->len > o2->len)
1723 return 1;
1724 return memcmp(o1->data, o2->data, o1->len);
1725 }
1726
1727 static int same_name(const char *n1, const char *n2)
1728 {
1729 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1730 }
1731
1732 static int
1733 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1734 {
1735 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1736 }
1737
1738 static int
1739 same_clid(clientid_t *cl1, clientid_t *cl2)
1740 {
1741 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1742 }
1743
1744 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1745 {
1746 int i;
1747
1748 if (g1->ngroups != g2->ngroups)
1749 return false;
1750 for (i=0; i<g1->ngroups; i++)
1751 if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
1752 return false;
1753 return true;
1754 }
1755
1756 /*
1757 * RFC 3530 language requires clid_inuse be returned when the
1758 * "principal" associated with a requests differs from that previously
1759 * used. We use uid, gid's, and gss principal string as our best
1760 * approximation. We also don't want to allow non-gss use of a client
1761 * established using gss: in theory cr_principal should catch that
1762 * change, but in practice cr_principal can be null even in the gss case
1763 * since gssd doesn't always pass down a principal string.
1764 */
1765 static bool is_gss_cred(struct svc_cred *cr)
1766 {
1767 /* Is cr_flavor one of the gss "pseudoflavors"?: */
1768 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1769 }
1770
1771
1772 static bool
1773 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1774 {
1775 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1776 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
1777 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
1778 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1779 return false;
1780 if (cr1->cr_principal == cr2->cr_principal)
1781 return true;
1782 if (!cr1->cr_principal || !cr2->cr_principal)
1783 return false;
1784 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1785 }
1786
1787 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
1788 {
1789 struct svc_cred *cr = &rqstp->rq_cred;
1790 u32 service;
1791
1792 if (!cr->cr_gss_mech)
1793 return false;
1794 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
1795 return service == RPC_GSS_SVC_INTEGRITY ||
1796 service == RPC_GSS_SVC_PRIVACY;
1797 }
1798
1799 static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
1800 {
1801 struct svc_cred *cr = &rqstp->rq_cred;
1802
1803 if (!cl->cl_mach_cred)
1804 return true;
1805 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
1806 return false;
1807 if (!svc_rqst_integrity_protected(rqstp))
1808 return false;
1809 if (!cr->cr_principal)
1810 return false;
1811 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
1812 }
1813
1814 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
1815 {
1816 __be32 verf[2];
1817
1818 /*
1819 * This is opaque to client, so no need to byte-swap. Use
1820 * __force to keep sparse happy
1821 */
1822 verf[0] = (__force __be32)get_seconds();
1823 verf[1] = (__force __be32)nn->clientid_counter;
1824 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1825 }
1826
1827 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1828 {
1829 clp->cl_clientid.cl_boot = nn->boot_time;
1830 clp->cl_clientid.cl_id = nn->clientid_counter++;
1831 gen_confirm(clp, nn);
1832 }
1833
1834 static struct nfs4_stid *
1835 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
1836 {
1837 struct nfs4_stid *ret;
1838
1839 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1840 if (!ret || !ret->sc_type)
1841 return NULL;
1842 return ret;
1843 }
1844
1845 static struct nfs4_stid *
1846 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1847 {
1848 struct nfs4_stid *s;
1849
1850 spin_lock(&cl->cl_lock);
1851 s = find_stateid_locked(cl, t);
1852 if (s != NULL) {
1853 if (typemask & s->sc_type)
1854 atomic_inc(&s->sc_count);
1855 else
1856 s = NULL;
1857 }
1858 spin_unlock(&cl->cl_lock);
1859 return s;
1860 }
1861
1862 static struct nfs4_client *create_client(struct xdr_netobj name,
1863 struct svc_rqst *rqstp, nfs4_verifier *verf)
1864 {
1865 struct nfs4_client *clp;
1866 struct sockaddr *sa = svc_addr(rqstp);
1867 int ret;
1868 struct net *net = SVC_NET(rqstp);
1869
1870 clp = alloc_client(name);
1871 if (clp == NULL)
1872 return NULL;
1873
1874 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1875 if (ret) {
1876 free_client(clp);
1877 return NULL;
1878 }
1879 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
1880 clp->cl_time = get_seconds();
1881 clear_bit(0, &clp->cl_cb_slot_busy);
1882 copy_verf(clp, verf);
1883 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1884 clp->cl_cb_session = NULL;
1885 clp->net = net;
1886 return clp;
1887 }
1888
1889 static void
1890 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
1891 {
1892 struct rb_node **new = &(root->rb_node), *parent = NULL;
1893 struct nfs4_client *clp;
1894
1895 while (*new) {
1896 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
1897 parent = *new;
1898
1899 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
1900 new = &((*new)->rb_left);
1901 else
1902 new = &((*new)->rb_right);
1903 }
1904
1905 rb_link_node(&new_clp->cl_namenode, parent, new);
1906 rb_insert_color(&new_clp->cl_namenode, root);
1907 }
1908
1909 static struct nfs4_client *
1910 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
1911 {
1912 int cmp;
1913 struct rb_node *node = root->rb_node;
1914 struct nfs4_client *clp;
1915
1916 while (node) {
1917 clp = rb_entry(node, struct nfs4_client, cl_namenode);
1918 cmp = compare_blob(&clp->cl_name, name);
1919 if (cmp > 0)
1920 node = node->rb_left;
1921 else if (cmp < 0)
1922 node = node->rb_right;
1923 else
1924 return clp;
1925 }
1926 return NULL;
1927 }
1928
1929 static void
1930 add_to_unconfirmed(struct nfs4_client *clp)
1931 {
1932 unsigned int idhashval;
1933 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1934
1935 lockdep_assert_held(&nn->client_lock);
1936
1937 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1938 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1939 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1940 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1941 renew_client_locked(clp);
1942 }
1943
1944 static void
1945 move_to_confirmed(struct nfs4_client *clp)
1946 {
1947 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1948 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1949
1950 lockdep_assert_held(&nn->client_lock);
1951
1952 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1953 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1954 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1955 add_clp_to_name_tree(clp, &nn->conf_name_tree);
1956 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1957 renew_client_locked(clp);
1958 }
1959
1960 static struct nfs4_client *
1961 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
1962 {
1963 struct nfs4_client *clp;
1964 unsigned int idhashval = clientid_hashval(clid->cl_id);
1965
1966 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
1967 if (same_clid(&clp->cl_clientid, clid)) {
1968 if ((bool)clp->cl_minorversion != sessions)
1969 return NULL;
1970 renew_client_locked(clp);
1971 return clp;
1972 }
1973 }
1974 return NULL;
1975 }
1976
1977 static struct nfs4_client *
1978 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1979 {
1980 struct list_head *tbl = nn->conf_id_hashtbl;
1981
1982 lockdep_assert_held(&nn->client_lock);
1983 return find_client_in_id_table(tbl, clid, sessions);
1984 }
1985
1986 static struct nfs4_client *
1987 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1988 {
1989 struct list_head *tbl = nn->unconf_id_hashtbl;
1990
1991 lockdep_assert_held(&nn->client_lock);
1992 return find_client_in_id_table(tbl, clid, sessions);
1993 }
1994
1995 static bool clp_used_exchangeid(struct nfs4_client *clp)
1996 {
1997 return clp->cl_exchange_flags != 0;
1998 }
1999
2000 static struct nfs4_client *
2001 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2002 {
2003 lockdep_assert_held(&nn->client_lock);
2004 return find_clp_in_name_tree(name, &nn->conf_name_tree);
2005 }
2006
2007 static struct nfs4_client *
2008 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2009 {
2010 lockdep_assert_held(&nn->client_lock);
2011 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2012 }
2013
2014 static void
2015 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2016 {
2017 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2018 struct sockaddr *sa = svc_addr(rqstp);
2019 u32 scopeid = rpc_get_scope_id(sa);
2020 unsigned short expected_family;
2021
2022 /* Currently, we only support tcp and tcp6 for the callback channel */
2023 if (se->se_callback_netid_len == 3 &&
2024 !memcmp(se->se_callback_netid_val, "tcp", 3))
2025 expected_family = AF_INET;
2026 else if (se->se_callback_netid_len == 4 &&
2027 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2028 expected_family = AF_INET6;
2029 else
2030 goto out_err;
2031
2032 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2033 se->se_callback_addr_len,
2034 (struct sockaddr *)&conn->cb_addr,
2035 sizeof(conn->cb_addr));
2036
2037 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2038 goto out_err;
2039
2040 if (conn->cb_addr.ss_family == AF_INET6)
2041 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2042
2043 conn->cb_prog = se->se_callback_prog;
2044 conn->cb_ident = se->se_callback_ident;
2045 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2046 return;
2047 out_err:
2048 conn->cb_addr.ss_family = AF_UNSPEC;
2049 conn->cb_addrlen = 0;
2050 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
2051 "will not receive delegations\n",
2052 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2053
2054 return;
2055 }
2056
2057 /*
2058 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2059 */
2060 static void
2061 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2062 {
2063 struct xdr_buf *buf = resp->xdr.buf;
2064 struct nfsd4_slot *slot = resp->cstate.slot;
2065 unsigned int base;
2066
2067 dprintk("--> %s slot %p\n", __func__, slot);
2068
2069 slot->sl_opcnt = resp->opcnt;
2070 slot->sl_status = resp->cstate.status;
2071
2072 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2073 if (nfsd4_not_cached(resp)) {
2074 slot->sl_datalen = 0;
2075 return;
2076 }
2077 base = resp->cstate.data_offset;
2078 slot->sl_datalen = buf->len - base;
2079 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2080 WARN("%s: sessions DRC could not cache compound\n", __func__);
2081 return;
2082 }
2083
2084 /*
2085 * Encode the replay sequence operation from the slot values.
2086 * If cachethis is FALSE encode the uncached rep error on the next
2087 * operation which sets resp->p and increments resp->opcnt for
2088 * nfs4svc_encode_compoundres.
2089 *
2090 */
2091 static __be32
2092 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2093 struct nfsd4_compoundres *resp)
2094 {
2095 struct nfsd4_op *op;
2096 struct nfsd4_slot *slot = resp->cstate.slot;
2097
2098 /* Encode the replayed sequence operation */
2099 op = &args->ops[resp->opcnt - 1];
2100 nfsd4_encode_operation(resp, op);
2101
2102 /* Return nfserr_retry_uncached_rep in next operation. */
2103 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
2104 op = &args->ops[resp->opcnt++];
2105 op->status = nfserr_retry_uncached_rep;
2106 nfsd4_encode_operation(resp, op);
2107 }
2108 return op->status;
2109 }
2110
2111 /*
2112 * The sequence operation is not cached because we can use the slot and
2113 * session values.
2114 */
2115 static __be32
2116 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2117 struct nfsd4_sequence *seq)
2118 {
2119 struct nfsd4_slot *slot = resp->cstate.slot;
2120 struct xdr_stream *xdr = &resp->xdr;
2121 __be32 *p;
2122 __be32 status;
2123
2124 dprintk("--> %s slot %p\n", __func__, slot);
2125
2126 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2127 if (status)
2128 return status;
2129
2130 p = xdr_reserve_space(xdr, slot->sl_datalen);
2131 if (!p) {
2132 WARN_ON_ONCE(1);
2133 return nfserr_serverfault;
2134 }
2135 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2136 xdr_commit_encode(xdr);
2137
2138 resp->opcnt = slot->sl_opcnt;
2139 return slot->sl_status;
2140 }
2141
2142 /*
2143 * Set the exchange_id flags returned by the server.
2144 */
2145 static void
2146 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2147 {
2148 /* pNFS is not supported */
2149 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2150
2151 /* Referrals are supported, Migration is not. */
2152 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2153
2154 /* set the wire flags to return to client. */
2155 clid->flags = new->cl_exchange_flags;
2156 }
2157
2158 static bool client_has_state(struct nfs4_client *clp)
2159 {
2160 /*
2161 * Note clp->cl_openowners check isn't quite right: there's no
2162 * need to count owners without stateid's.
2163 *
2164 * Also note we should probably be using this in 4.0 case too.
2165 */
2166 return !list_empty(&clp->cl_openowners)
2167 || !list_empty(&clp->cl_delegations)
2168 || !list_empty(&clp->cl_sessions);
2169 }
2170
2171 __be32
2172 nfsd4_exchange_id(struct svc_rqst *rqstp,
2173 struct nfsd4_compound_state *cstate,
2174 struct nfsd4_exchange_id *exid)
2175 {
2176 struct nfs4_client *conf, *new;
2177 struct nfs4_client *unconf = NULL;
2178 __be32 status;
2179 char addr_str[INET6_ADDRSTRLEN];
2180 nfs4_verifier verf = exid->verifier;
2181 struct sockaddr *sa = svc_addr(rqstp);
2182 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2183 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2184
2185 rpc_ntop(sa, addr_str, sizeof(addr_str));
2186 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2187 "ip_addr=%s flags %x, spa_how %d\n",
2188 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
2189 addr_str, exid->flags, exid->spa_how);
2190
2191 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2192 return nfserr_inval;
2193
2194 switch (exid->spa_how) {
2195 case SP4_MACH_CRED:
2196 if (!svc_rqst_integrity_protected(rqstp))
2197 return nfserr_inval;
2198 case SP4_NONE:
2199 break;
2200 default: /* checked by xdr code */
2201 WARN_ON_ONCE(1);
2202 case SP4_SSV:
2203 return nfserr_encr_alg_unsupp;
2204 }
2205
2206 new = create_client(exid->clname, rqstp, &verf);
2207 if (new == NULL)
2208 return nfserr_jukebox;
2209
2210 /* Cases below refer to rfc 5661 section 18.35.4: */
2211 spin_lock(&nn->client_lock);
2212 conf = find_confirmed_client_by_name(&exid->clname, nn);
2213 if (conf) {
2214 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2215 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2216
2217 if (update) {
2218 if (!clp_used_exchangeid(conf)) { /* buggy client */
2219 status = nfserr_inval;
2220 goto out;
2221 }
2222 if (!mach_creds_match(conf, rqstp)) {
2223 status = nfserr_wrong_cred;
2224 goto out;
2225 }
2226 if (!creds_match) { /* case 9 */
2227 status = nfserr_perm;
2228 goto out;
2229 }
2230 if (!verfs_match) { /* case 8 */
2231 status = nfserr_not_same;
2232 goto out;
2233 }
2234 /* case 6 */
2235 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2236 goto out_copy;
2237 }
2238 if (!creds_match) { /* case 3 */
2239 if (client_has_state(conf)) {
2240 status = nfserr_clid_inuse;
2241 goto out;
2242 }
2243 goto out_new;
2244 }
2245 if (verfs_match) { /* case 2 */
2246 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2247 goto out_copy;
2248 }
2249 /* case 5, client reboot */
2250 conf = NULL;
2251 goto out_new;
2252 }
2253
2254 if (update) { /* case 7 */
2255 status = nfserr_noent;
2256 goto out;
2257 }
2258
2259 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
2260 if (unconf) /* case 4, possible retry or client restart */
2261 unhash_client_locked(unconf);
2262
2263 /* case 1 (normal case) */
2264 out_new:
2265 if (conf) {
2266 status = mark_client_expired_locked(conf);
2267 if (status)
2268 goto out;
2269 }
2270 new->cl_minorversion = cstate->minorversion;
2271 new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
2272
2273 gen_clid(new, nn);
2274 add_to_unconfirmed(new);
2275 swap(new, conf);
2276 out_copy:
2277 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2278 exid->clientid.cl_id = conf->cl_clientid.cl_id;
2279
2280 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2281 nfsd4_set_ex_flags(conf, exid);
2282
2283 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2284 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2285 status = nfs_ok;
2286
2287 out:
2288 spin_unlock(&nn->client_lock);
2289 if (new)
2290 expire_client(new);
2291 if (unconf)
2292 expire_client(unconf);
2293 return status;
2294 }
2295
2296 static __be32
2297 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2298 {
2299 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2300 slot_seqid);
2301
2302 /* The slot is in use, and no response has been sent. */
2303 if (slot_inuse) {
2304 if (seqid == slot_seqid)
2305 return nfserr_jukebox;
2306 else
2307 return nfserr_seq_misordered;
2308 }
2309 /* Note unsigned 32-bit arithmetic handles wraparound: */
2310 if (likely(seqid == slot_seqid + 1))
2311 return nfs_ok;
2312 if (seqid == slot_seqid)
2313 return nfserr_replay_cache;
2314 return nfserr_seq_misordered;
2315 }
2316
2317 /*
2318 * Cache the create session result into the create session single DRC
2319 * slot cache by saving the xdr structure. sl_seqid has been set.
2320 * Do this for solo or embedded create session operations.
2321 */
2322 static void
2323 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2324 struct nfsd4_clid_slot *slot, __be32 nfserr)
2325 {
2326 slot->sl_status = nfserr;
2327 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2328 }
2329
2330 static __be32
2331 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2332 struct nfsd4_clid_slot *slot)
2333 {
2334 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2335 return slot->sl_status;
2336 }
2337
2338 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2339 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2340 1 + /* MIN tag is length with zero, only length */ \
2341 3 + /* version, opcount, opcode */ \
2342 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2343 /* seqid, slotID, slotID, cache */ \
2344 4 ) * sizeof(__be32))
2345
2346 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2347 2 + /* verifier: AUTH_NULL, length 0 */\
2348 1 + /* status */ \
2349 1 + /* MIN tag is length with zero, only length */ \
2350 3 + /* opcount, opcode, opstatus*/ \
2351 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2352 /* seqid, slotID, slotID, slotID, status */ \
2353 5 ) * sizeof(__be32))
2354
2355 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2356 {
2357 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2358
2359 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2360 return nfserr_toosmall;
2361 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2362 return nfserr_toosmall;
2363 ca->headerpadsz = 0;
2364 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2365 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2366 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2367 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2368 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2369 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2370 /*
2371 * Note decreasing slot size below client's request may make it
2372 * difficult for client to function correctly, whereas
2373 * decreasing the number of slots will (just?) affect
2374 * performance. When short on memory we therefore prefer to
2375 * decrease number of slots instead of their size. Clients that
2376 * request larger slots than they need will get poor results:
2377 */
2378 ca->maxreqs = nfsd4_get_drc_mem(ca);
2379 if (!ca->maxreqs)
2380 return nfserr_jukebox;
2381
2382 return nfs_ok;
2383 }
2384
2385 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2386 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
2387 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2388 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
2389
2390 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2391 {
2392 ca->headerpadsz = 0;
2393
2394 /*
2395 * These RPC_MAX_HEADER macros are overkill, especially since we
2396 * don't even do gss on the backchannel yet. But this is still
2397 * less than 1k. Tighten up this estimate in the unlikely event
2398 * it turns out to be a problem for some client:
2399 */
2400 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2401 return nfserr_toosmall;
2402 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2403 return nfserr_toosmall;
2404 ca->maxresp_cached = 0;
2405 if (ca->maxops < 2)
2406 return nfserr_toosmall;
2407
2408 return nfs_ok;
2409 }
2410
2411 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2412 {
2413 switch (cbs->flavor) {
2414 case RPC_AUTH_NULL:
2415 case RPC_AUTH_UNIX:
2416 return nfs_ok;
2417 default:
2418 /*
2419 * GSS case: the spec doesn't allow us to return this
2420 * error. But it also doesn't allow us not to support
2421 * GSS.
2422 * I'd rather this fail hard than return some error the
2423 * client might think it can already handle:
2424 */
2425 return nfserr_encr_alg_unsupp;
2426 }
2427 }
2428
2429 __be32
2430 nfsd4_create_session(struct svc_rqst *rqstp,
2431 struct nfsd4_compound_state *cstate,
2432 struct nfsd4_create_session *cr_ses)
2433 {
2434 struct sockaddr *sa = svc_addr(rqstp);
2435 struct nfs4_client *conf, *unconf;
2436 struct nfs4_client *old = NULL;
2437 struct nfsd4_session *new;
2438 struct nfsd4_conn *conn;
2439 struct nfsd4_clid_slot *cs_slot = NULL;
2440 __be32 status = 0;
2441 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2442
2443 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2444 return nfserr_inval;
2445 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2446 if (status)
2447 return status;
2448 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2449 if (status)
2450 return status;
2451 status = check_backchannel_attrs(&cr_ses->back_channel);
2452 if (status)
2453 goto out_release_drc_mem;
2454 status = nfserr_jukebox;
2455 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2456 if (!new)
2457 goto out_release_drc_mem;
2458 conn = alloc_conn_from_crses(rqstp, cr_ses);
2459 if (!conn)
2460 goto out_free_session;
2461
2462 spin_lock(&nn->client_lock);
2463 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2464 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2465 WARN_ON_ONCE(conf && unconf);
2466
2467 if (conf) {
2468 status = nfserr_wrong_cred;
2469 if (!mach_creds_match(conf, rqstp))
2470 goto out_free_conn;
2471 cs_slot = &conf->cl_cs_slot;
2472 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2473 if (status == nfserr_replay_cache) {
2474 status = nfsd4_replay_create_session(cr_ses, cs_slot);
2475 goto out_free_conn;
2476 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
2477 status = nfserr_seq_misordered;
2478 goto out_free_conn;
2479 }
2480 } else if (unconf) {
2481 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2482 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2483 status = nfserr_clid_inuse;
2484 goto out_free_conn;
2485 }
2486 status = nfserr_wrong_cred;
2487 if (!mach_creds_match(unconf, rqstp))
2488 goto out_free_conn;
2489 cs_slot = &unconf->cl_cs_slot;
2490 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2491 if (status) {
2492 /* an unconfirmed replay returns misordered */
2493 status = nfserr_seq_misordered;
2494 goto out_free_conn;
2495 }
2496 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2497 if (old) {
2498 status = mark_client_expired_locked(old);
2499 if (status) {
2500 old = NULL;
2501 goto out_free_conn;
2502 }
2503 }
2504 move_to_confirmed(unconf);
2505 conf = unconf;
2506 } else {
2507 status = nfserr_stale_clientid;
2508 goto out_free_conn;
2509 }
2510 status = nfs_ok;
2511 /*
2512 * We do not support RDMA or persistent sessions
2513 */
2514 cr_ses->flags &= ~SESSION4_PERSIST;
2515 cr_ses->flags &= ~SESSION4_RDMA;
2516
2517 init_session(rqstp, new, conf, cr_ses);
2518 nfsd4_get_session_locked(new);
2519
2520 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2521 NFS4_MAX_SESSIONID_LEN);
2522 cs_slot->sl_seqid++;
2523 cr_ses->seqid = cs_slot->sl_seqid;
2524
2525 /* cache solo and embedded create sessions under the client_lock */
2526 nfsd4_cache_create_session(cr_ses, cs_slot, status);
2527 spin_unlock(&nn->client_lock);
2528 /* init connection and backchannel */
2529 nfsd4_init_conn(rqstp, conn, new);
2530 nfsd4_put_session(new);
2531 if (old)
2532 expire_client(old);
2533 return status;
2534 out_free_conn:
2535 spin_unlock(&nn->client_lock);
2536 free_conn(conn);
2537 if (old)
2538 expire_client(old);
2539 out_free_session:
2540 __free_session(new);
2541 out_release_drc_mem:
2542 nfsd4_put_drc_mem(&cr_ses->fore_channel);
2543 return status;
2544 }
2545
2546 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2547 {
2548 switch (*dir) {
2549 case NFS4_CDFC4_FORE:
2550 case NFS4_CDFC4_BACK:
2551 return nfs_ok;
2552 case NFS4_CDFC4_FORE_OR_BOTH:
2553 case NFS4_CDFC4_BACK_OR_BOTH:
2554 *dir = NFS4_CDFC4_BOTH;
2555 return nfs_ok;
2556 };
2557 return nfserr_inval;
2558 }
2559
2560 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
2561 {
2562 struct nfsd4_session *session = cstate->session;
2563 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2564 __be32 status;
2565
2566 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2567 if (status)
2568 return status;
2569 spin_lock(&nn->client_lock);
2570 session->se_cb_prog = bc->bc_cb_program;
2571 session->se_cb_sec = bc->bc_cb_sec;
2572 spin_unlock(&nn->client_lock);
2573
2574 nfsd4_probe_callback(session->se_client);
2575
2576 return nfs_ok;
2577 }
2578
2579 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2580 struct nfsd4_compound_state *cstate,
2581 struct nfsd4_bind_conn_to_session *bcts)
2582 {
2583 __be32 status;
2584 struct nfsd4_conn *conn;
2585 struct nfsd4_session *session;
2586 struct net *net = SVC_NET(rqstp);
2587 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2588
2589 if (!nfsd4_last_compound_op(rqstp))
2590 return nfserr_not_only_op;
2591 spin_lock(&nn->client_lock);
2592 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2593 spin_unlock(&nn->client_lock);
2594 if (!session)
2595 goto out_no_session;
2596 status = nfserr_wrong_cred;
2597 if (!mach_creds_match(session->se_client, rqstp))
2598 goto out;
2599 status = nfsd4_map_bcts_dir(&bcts->dir);
2600 if (status)
2601 goto out;
2602 conn = alloc_conn(rqstp, bcts->dir);
2603 status = nfserr_jukebox;
2604 if (!conn)
2605 goto out;
2606 nfsd4_init_conn(rqstp, conn, session);
2607 status = nfs_ok;
2608 out:
2609 nfsd4_put_session(session);
2610 out_no_session:
2611 return status;
2612 }
2613
2614 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2615 {
2616 if (!session)
2617 return 0;
2618 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2619 }
2620
2621 __be32
2622 nfsd4_destroy_session(struct svc_rqst *r,
2623 struct nfsd4_compound_state *cstate,
2624 struct nfsd4_destroy_session *sessionid)
2625 {
2626 struct nfsd4_session *ses;
2627 __be32 status;
2628 int ref_held_by_me = 0;
2629 struct net *net = SVC_NET(r);
2630 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2631
2632 status = nfserr_not_only_op;
2633 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2634 if (!nfsd4_last_compound_op(r))
2635 goto out;
2636 ref_held_by_me++;
2637 }
2638 dump_sessionid(__func__, &sessionid->sessionid);
2639 spin_lock(&nn->client_lock);
2640 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2641 if (!ses)
2642 goto out_client_lock;
2643 status = nfserr_wrong_cred;
2644 if (!mach_creds_match(ses->se_client, r))
2645 goto out_put_session;
2646 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2647 if (status)
2648 goto out_put_session;
2649 unhash_session(ses);
2650 spin_unlock(&nn->client_lock);
2651
2652 nfsd4_probe_callback_sync(ses->se_client);
2653
2654 spin_lock(&nn->client_lock);
2655 status = nfs_ok;
2656 out_put_session:
2657 nfsd4_put_session_locked(ses);
2658 out_client_lock:
2659 spin_unlock(&nn->client_lock);
2660 out:
2661 return status;
2662 }
2663
2664 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2665 {
2666 struct nfsd4_conn *c;
2667
2668 list_for_each_entry(c, &s->se_conns, cn_persession) {
2669 if (c->cn_xprt == xpt) {
2670 return c;
2671 }
2672 }
2673 return NULL;
2674 }
2675
2676 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2677 {
2678 struct nfs4_client *clp = ses->se_client;
2679 struct nfsd4_conn *c;
2680 __be32 status = nfs_ok;
2681 int ret;
2682
2683 spin_lock(&clp->cl_lock);
2684 c = __nfsd4_find_conn(new->cn_xprt, ses);
2685 if (c)
2686 goto out_free;
2687 status = nfserr_conn_not_bound_to_session;
2688 if (clp->cl_mach_cred)
2689 goto out_free;
2690 __nfsd4_hash_conn(new, ses);
2691 spin_unlock(&clp->cl_lock);
2692 ret = nfsd4_register_conn(new);
2693 if (ret)
2694 /* oops; xprt is already down: */
2695 nfsd4_conn_lost(&new->cn_xpt_user);
2696 return nfs_ok;
2697 out_free:
2698 spin_unlock(&clp->cl_lock);
2699 free_conn(new);
2700 return status;
2701 }
2702
2703 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2704 {
2705 struct nfsd4_compoundargs *args = rqstp->rq_argp;
2706
2707 return args->opcnt > session->se_fchannel.maxops;
2708 }
2709
2710 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2711 struct nfsd4_session *session)
2712 {
2713 struct xdr_buf *xb = &rqstp->rq_arg;
2714
2715 return xb->len > session->se_fchannel.maxreq_sz;
2716 }
2717
2718 __be32
2719 nfsd4_sequence(struct svc_rqst *rqstp,
2720 struct nfsd4_compound_state *cstate,
2721 struct nfsd4_sequence *seq)
2722 {
2723 struct nfsd4_compoundres *resp = rqstp->rq_resp;
2724 struct xdr_stream *xdr = &resp->xdr;
2725 struct nfsd4_session *session;
2726 struct nfs4_client *clp;
2727 struct nfsd4_slot *slot;
2728 struct nfsd4_conn *conn;
2729 __be32 status;
2730 int buflen;
2731 struct net *net = SVC_NET(rqstp);
2732 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2733
2734 if (resp->opcnt != 1)
2735 return nfserr_sequence_pos;
2736
2737 /*
2738 * Will be either used or freed by nfsd4_sequence_check_conn
2739 * below.
2740 */
2741 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2742 if (!conn)
2743 return nfserr_jukebox;
2744
2745 spin_lock(&nn->client_lock);
2746 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
2747 if (!session)
2748 goto out_no_session;
2749 clp = session->se_client;
2750
2751 status = nfserr_too_many_ops;
2752 if (nfsd4_session_too_many_ops(rqstp, session))
2753 goto out_put_session;
2754
2755 status = nfserr_req_too_big;
2756 if (nfsd4_request_too_big(rqstp, session))
2757 goto out_put_session;
2758
2759 status = nfserr_badslot;
2760 if (seq->slotid >= session->se_fchannel.maxreqs)
2761 goto out_put_session;
2762
2763 slot = session->se_slots[seq->slotid];
2764 dprintk("%s: slotid %d\n", __func__, seq->slotid);
2765
2766 /* We do not negotiate the number of slots yet, so set the
2767 * maxslots to the session maxreqs which is used to encode
2768 * sr_highest_slotid and the sr_target_slot id to maxslots */
2769 seq->maxslots = session->se_fchannel.maxreqs;
2770
2771 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2772 slot->sl_flags & NFSD4_SLOT_INUSE);
2773 if (status == nfserr_replay_cache) {
2774 status = nfserr_seq_misordered;
2775 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2776 goto out_put_session;
2777 cstate->slot = slot;
2778 cstate->session = session;
2779 cstate->clp = clp;
2780 /* Return the cached reply status and set cstate->status
2781 * for nfsd4_proc_compound processing */
2782 status = nfsd4_replay_cache_entry(resp, seq);
2783 cstate->status = nfserr_replay_cache;
2784 goto out;
2785 }
2786 if (status)
2787 goto out_put_session;
2788
2789 status = nfsd4_sequence_check_conn(conn, session);
2790 conn = NULL;
2791 if (status)
2792 goto out_put_session;
2793
2794 buflen = (seq->cachethis) ?
2795 session->se_fchannel.maxresp_cached :
2796 session->se_fchannel.maxresp_sz;
2797 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
2798 nfserr_rep_too_big;
2799 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
2800 goto out_put_session;
2801 svc_reserve(rqstp, buflen);
2802
2803 status = nfs_ok;
2804 /* Success! bump slot seqid */
2805 slot->sl_seqid = seq->seqid;
2806 slot->sl_flags |= NFSD4_SLOT_INUSE;
2807 if (seq->cachethis)
2808 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2809 else
2810 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2811
2812 cstate->slot = slot;
2813 cstate->session = session;
2814 cstate->clp = clp;
2815
2816 out:
2817 switch (clp->cl_cb_state) {
2818 case NFSD4_CB_DOWN:
2819 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2820 break;
2821 case NFSD4_CB_FAULT:
2822 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2823 break;
2824 default:
2825 seq->status_flags = 0;
2826 }
2827 if (!list_empty(&clp->cl_revoked))
2828 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
2829 out_no_session:
2830 if (conn)
2831 free_conn(conn);
2832 spin_unlock(&nn->client_lock);
2833 return status;
2834 out_put_session:
2835 nfsd4_put_session_locked(session);
2836 goto out_no_session;
2837 }
2838
2839 void
2840 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
2841 {
2842 struct nfsd4_compound_state *cs = &resp->cstate;
2843
2844 if (nfsd4_has_session(cs)) {
2845 if (cs->status != nfserr_replay_cache) {
2846 nfsd4_store_cache_entry(resp);
2847 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
2848 }
2849 /* Drop session reference that was taken in nfsd4_sequence() */
2850 nfsd4_put_session(cs->session);
2851 } else if (cs->clp)
2852 put_client_renew(cs->clp);
2853 }
2854
2855 __be32
2856 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2857 {
2858 struct nfs4_client *conf, *unconf;
2859 struct nfs4_client *clp = NULL;
2860 __be32 status = 0;
2861 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2862
2863 spin_lock(&nn->client_lock);
2864 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2865 conf = find_confirmed_client(&dc->clientid, true, nn);
2866 WARN_ON_ONCE(conf && unconf);
2867
2868 if (conf) {
2869 if (client_has_state(conf)) {
2870 status = nfserr_clientid_busy;
2871 goto out;
2872 }
2873 status = mark_client_expired_locked(conf);
2874 if (status)
2875 goto out;
2876 clp = conf;
2877 } else if (unconf)
2878 clp = unconf;
2879 else {
2880 status = nfserr_stale_clientid;
2881 goto out;
2882 }
2883 if (!mach_creds_match(clp, rqstp)) {
2884 clp = NULL;
2885 status = nfserr_wrong_cred;
2886 goto out;
2887 }
2888 unhash_client_locked(clp);
2889 out:
2890 spin_unlock(&nn->client_lock);
2891 if (clp)
2892 expire_client(clp);
2893 return status;
2894 }
2895
2896 __be32
2897 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2898 {
2899 __be32 status = 0;
2900
2901 if (rc->rca_one_fs) {
2902 if (!cstate->current_fh.fh_dentry)
2903 return nfserr_nofilehandle;
2904 /*
2905 * We don't take advantage of the rca_one_fs case.
2906 * That's OK, it's optional, we can safely ignore it.
2907 */
2908 return nfs_ok;
2909 }
2910
2911 status = nfserr_complete_already;
2912 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2913 &cstate->session->se_client->cl_flags))
2914 goto out;
2915
2916 status = nfserr_stale_clientid;
2917 if (is_client_expired(cstate->session->se_client))
2918 /*
2919 * The following error isn't really legal.
2920 * But we only get here if the client just explicitly
2921 * destroyed the client. Surely it no longer cares what
2922 * error it gets back on an operation for the dead
2923 * client.
2924 */
2925 goto out;
2926
2927 status = nfs_ok;
2928 nfsd4_client_record_create(cstate->session->se_client);
2929 out:
2930 return status;
2931 }
2932
2933 __be32
2934 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2935 struct nfsd4_setclientid *setclid)
2936 {
2937 struct xdr_netobj clname = setclid->se_name;
2938 nfs4_verifier clverifier = setclid->se_verf;
2939 struct nfs4_client *conf, *new;
2940 struct nfs4_client *unconf = NULL;
2941 __be32 status;
2942 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2943
2944 new = create_client(clname, rqstp, &clverifier);
2945 if (new == NULL)
2946 return nfserr_jukebox;
2947 /* Cases below refer to rfc 3530 section 14.2.33: */
2948 spin_lock(&nn->client_lock);
2949 conf = find_confirmed_client_by_name(&clname, nn);
2950 if (conf) {
2951 /* case 0: */
2952 status = nfserr_clid_inuse;
2953 if (clp_used_exchangeid(conf))
2954 goto out;
2955 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2956 char addr_str[INET6_ADDRSTRLEN];
2957 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2958 sizeof(addr_str));
2959 dprintk("NFSD: setclientid: string in use by client "
2960 "at %s\n", addr_str);
2961 goto out;
2962 }
2963 }
2964 unconf = find_unconfirmed_client_by_name(&clname, nn);
2965 if (unconf)
2966 unhash_client_locked(unconf);
2967 if (conf && same_verf(&conf->cl_verifier, &clverifier))
2968 /* case 1: probable callback update */
2969 copy_clid(new, conf);
2970 else /* case 4 (new client) or cases 2, 3 (client reboot): */
2971 gen_clid(new, nn);
2972 new->cl_minorversion = 0;
2973 gen_callback(new, setclid, rqstp);
2974 add_to_unconfirmed(new);
2975 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2976 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2977 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2978 new = NULL;
2979 status = nfs_ok;
2980 out:
2981 spin_unlock(&nn->client_lock);
2982 if (new)
2983 free_client(new);
2984 if (unconf)
2985 expire_client(unconf);
2986 return status;
2987 }
2988
2989
2990 __be32
2991 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2992 struct nfsd4_compound_state *cstate,
2993 struct nfsd4_setclientid_confirm *setclientid_confirm)
2994 {
2995 struct nfs4_client *conf, *unconf;
2996 struct nfs4_client *old = NULL;
2997 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2998 clientid_t * clid = &setclientid_confirm->sc_clientid;
2999 __be32 status;
3000 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3001
3002 if (STALE_CLIENTID(clid, nn))
3003 return nfserr_stale_clientid;
3004
3005 spin_lock(&nn->client_lock);
3006 conf = find_confirmed_client(clid, false, nn);
3007 unconf = find_unconfirmed_client(clid, false, nn);
3008 /*
3009 * We try hard to give out unique clientid's, so if we get an
3010 * attempt to confirm the same clientid with a different cred,
3011 * there's a bug somewhere. Let's charitably assume it's our
3012 * bug.
3013 */
3014 status = nfserr_serverfault;
3015 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3016 goto out;
3017 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3018 goto out;
3019 /* cases below refer to rfc 3530 section 14.2.34: */
3020 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3021 if (conf && !unconf) /* case 2: probable retransmit */
3022 status = nfs_ok;
3023 else /* case 4: client hasn't noticed we rebooted yet? */
3024 status = nfserr_stale_clientid;
3025 goto out;
3026 }
3027 status = nfs_ok;
3028 if (conf) { /* case 1: callback update */
3029 old = unconf;
3030 unhash_client_locked(old);
3031 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3032 } else { /* case 3: normal case; new or rebooted client */
3033 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3034 if (old) {
3035 status = mark_client_expired_locked(old);
3036 if (status) {
3037 old = NULL;
3038 goto out;
3039 }
3040 }
3041 move_to_confirmed(unconf);
3042 conf = unconf;
3043 }
3044 get_client_locked(conf);
3045 spin_unlock(&nn->client_lock);
3046 nfsd4_probe_callback(conf);
3047 spin_lock(&nn->client_lock);
3048 put_client_renew_locked(conf);
3049 out:
3050 spin_unlock(&nn->client_lock);
3051 if (old)
3052 expire_client(old);
3053 return status;
3054 }
3055
3056 static struct nfs4_file *nfsd4_alloc_file(void)
3057 {
3058 return kmem_cache_alloc(file_slab, GFP_KERNEL);
3059 }
3060
3061 /* OPEN Share state helper functions */
3062 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3063 struct nfs4_file *fp)
3064 {
3065 lockdep_assert_held(&state_lock);
3066
3067 atomic_set(&fp->fi_ref, 1);
3068 spin_lock_init(&fp->fi_lock);
3069 INIT_LIST_HEAD(&fp->fi_stateids);
3070 INIT_LIST_HEAD(&fp->fi_delegations);
3071 fh_copy_shallow(&fp->fi_fhandle, fh);
3072 fp->fi_deleg_file = NULL;
3073 fp->fi_had_conflict = false;
3074 fp->fi_share_deny = 0;
3075 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3076 memset(fp->fi_access, 0, sizeof(fp->fi_access));
3077 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3078 }
3079
3080 void
3081 nfsd4_free_slabs(void)
3082 {
3083 kmem_cache_destroy(openowner_slab);
3084 kmem_cache_destroy(lockowner_slab);
3085 kmem_cache_destroy(file_slab);
3086 kmem_cache_destroy(stateid_slab);
3087 kmem_cache_destroy(deleg_slab);
3088 }
3089
3090 int
3091 nfsd4_init_slabs(void)
3092 {
3093 openowner_slab = kmem_cache_create("nfsd4_openowners",
3094 sizeof(struct nfs4_openowner), 0, 0, NULL);
3095 if (openowner_slab == NULL)
3096 goto out;
3097 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3098 sizeof(struct nfs4_lockowner), 0, 0, NULL);
3099 if (lockowner_slab == NULL)
3100 goto out_free_openowner_slab;
3101 file_slab = kmem_cache_create("nfsd4_files",
3102 sizeof(struct nfs4_file), 0, 0, NULL);
3103 if (file_slab == NULL)
3104 goto out_free_lockowner_slab;
3105 stateid_slab = kmem_cache_create("nfsd4_stateids",
3106 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3107 if (stateid_slab == NULL)
3108 goto out_free_file_slab;
3109 deleg_slab = kmem_cache_create("nfsd4_delegations",
3110 sizeof(struct nfs4_delegation), 0, 0, NULL);
3111 if (deleg_slab == NULL)
3112 goto out_free_stateid_slab;
3113 return 0;
3114
3115 out_free_stateid_slab:
3116 kmem_cache_destroy(stateid_slab);
3117 out_free_file_slab:
3118 kmem_cache_destroy(file_slab);
3119 out_free_lockowner_slab:
3120 kmem_cache_destroy(lockowner_slab);
3121 out_free_openowner_slab:
3122 kmem_cache_destroy(openowner_slab);
3123 out:
3124 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3125 return -ENOMEM;
3126 }
3127
3128 static void init_nfs4_replay(struct nfs4_replay *rp)
3129 {
3130 rp->rp_status = nfserr_serverfault;
3131 rp->rp_buflen = 0;
3132 rp->rp_buf = rp->rp_ibuf;
3133 mutex_init(&rp->rp_mutex);
3134 }
3135
3136 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3137 struct nfs4_stateowner *so)
3138 {
3139 if (!nfsd4_has_session(cstate)) {
3140 mutex_lock(&so->so_replay.rp_mutex);
3141 cstate->replay_owner = nfs4_get_stateowner(so);
3142 }
3143 }
3144
3145 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3146 {
3147 struct nfs4_stateowner *so = cstate->replay_owner;
3148
3149 if (so != NULL) {
3150 cstate->replay_owner = NULL;
3151 mutex_unlock(&so->so_replay.rp_mutex);
3152 nfs4_put_stateowner(so);
3153 }
3154 }
3155
3156 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3157 {
3158 struct nfs4_stateowner *sop;
3159
3160 sop = kmem_cache_alloc(slab, GFP_KERNEL);
3161 if (!sop)
3162 return NULL;
3163
3164 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3165 if (!sop->so_owner.data) {
3166 kmem_cache_free(slab, sop);
3167 return NULL;
3168 }
3169 sop->so_owner.len = owner->len;
3170
3171 INIT_LIST_HEAD(&sop->so_stateids);
3172 sop->so_client = clp;
3173 init_nfs4_replay(&sop->so_replay);
3174 atomic_set(&sop->so_count, 1);
3175 return sop;
3176 }
3177
3178 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3179 {
3180 lockdep_assert_held(&clp->cl_lock);
3181
3182 list_add(&oo->oo_owner.so_strhash,
3183 &clp->cl_ownerstr_hashtbl[strhashval]);
3184 list_add(&oo->oo_perclient, &clp->cl_openowners);
3185 }
3186
3187 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3188 {
3189 unhash_openowner_locked(openowner(so));
3190 }
3191
3192 static void nfs4_free_openowner(struct nfs4_stateowner *so)
3193 {
3194 struct nfs4_openowner *oo = openowner(so);
3195
3196 kmem_cache_free(openowner_slab, oo);
3197 }
3198
3199 static const struct nfs4_stateowner_operations openowner_ops = {
3200 .so_unhash = nfs4_unhash_openowner,
3201 .so_free = nfs4_free_openowner,
3202 };
3203
3204 static struct nfs4_openowner *
3205 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3206 struct nfsd4_compound_state *cstate)
3207 {
3208 struct nfs4_client *clp = cstate->clp;
3209 struct nfs4_openowner *oo, *ret;
3210
3211 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3212 if (!oo)
3213 return NULL;
3214 oo->oo_owner.so_ops = &openowner_ops;
3215 oo->oo_owner.so_is_open_owner = 1;
3216 oo->oo_owner.so_seqid = open->op_seqid;
3217 oo->oo_flags = 0;
3218 if (nfsd4_has_session(cstate))
3219 oo->oo_flags |= NFS4_OO_CONFIRMED;
3220 oo->oo_time = 0;
3221 oo->oo_last_closed_stid = NULL;
3222 INIT_LIST_HEAD(&oo->oo_close_lru);
3223 spin_lock(&clp->cl_lock);
3224 ret = find_openstateowner_str_locked(strhashval, open, clp);
3225 if (ret == NULL) {
3226 hash_openowner(oo, clp, strhashval);
3227 ret = oo;
3228 } else
3229 nfs4_free_openowner(&oo->oo_owner);
3230 spin_unlock(&clp->cl_lock);
3231 return oo;
3232 }
3233
3234 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
3235 struct nfs4_openowner *oo = open->op_openowner;
3236
3237 atomic_inc(&stp->st_stid.sc_count);
3238 stp->st_stid.sc_type = NFS4_OPEN_STID;
3239 INIT_LIST_HEAD(&stp->st_locks);
3240 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3241 get_nfs4_file(fp);
3242 stp->st_stid.sc_file = fp;
3243 stp->st_access_bmap = 0;
3244 stp->st_deny_bmap = 0;
3245 stp->st_openstp = NULL;
3246 spin_lock(&oo->oo_owner.so_client->cl_lock);
3247 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3248 spin_lock(&fp->fi_lock);
3249 list_add(&stp->st_perfile, &fp->fi_stateids);
3250 spin_unlock(&fp->fi_lock);
3251 spin_unlock(&oo->oo_owner.so_client->cl_lock);
3252 }
3253
3254 /*
3255 * In the 4.0 case we need to keep the owners around a little while to handle
3256 * CLOSE replay. We still do need to release any file access that is held by
3257 * them before returning however.
3258 */
3259 static void
3260 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3261 {
3262 struct nfs4_ol_stateid *last;
3263 struct nfs4_openowner *oo = openowner(s->st_stateowner);
3264 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3265 nfsd_net_id);
3266
3267 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3268
3269 /*
3270 * We know that we hold one reference via nfsd4_close, and another
3271 * "persistent" reference for the client. If the refcount is higher
3272 * than 2, then there are still calls in progress that are using this
3273 * stateid. We can't put the sc_file reference until they are finished.
3274 * Wait for the refcount to drop to 2. Since it has been unhashed,
3275 * there should be no danger of the refcount going back up again at
3276 * this point.
3277 */
3278 wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
3279
3280 release_all_access(s);
3281 if (s->st_stid.sc_file) {
3282 put_nfs4_file(s->st_stid.sc_file);
3283 s->st_stid.sc_file = NULL;
3284 }
3285
3286 spin_lock(&nn->client_lock);
3287 last = oo->oo_last_closed_stid;
3288 oo->oo_last_closed_stid = s;
3289 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3290 oo->oo_time = get_seconds();
3291 spin_unlock(&nn->client_lock);
3292 if (last)
3293 nfs4_put_stid(&last->st_stid);
3294 }
3295
3296 /* search file_hashtbl[] for file */
3297 static struct nfs4_file *
3298 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3299 {
3300 struct nfs4_file *fp;
3301
3302 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3303 if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
3304 if (atomic_inc_not_zero(&fp->fi_ref))
3305 return fp;
3306 }
3307 }
3308 return NULL;
3309 }
3310
3311 static struct nfs4_file *
3312 find_file(struct knfsd_fh *fh)
3313 {
3314 struct nfs4_file *fp;
3315 unsigned int hashval = file_hashval(fh);
3316
3317 rcu_read_lock();
3318 fp = find_file_locked(fh, hashval);
3319 rcu_read_unlock();
3320 return fp;
3321 }
3322
3323 static struct nfs4_file *
3324 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3325 {
3326 struct nfs4_file *fp;
3327 unsigned int hashval = file_hashval(fh);
3328
3329 rcu_read_lock();
3330 fp = find_file_locked(fh, hashval);
3331 rcu_read_unlock();
3332 if (fp)
3333 return fp;
3334
3335 spin_lock(&state_lock);
3336 fp = find_file_locked(fh, hashval);
3337 if (likely(fp == NULL)) {
3338 nfsd4_init_file(fh, hashval, new);
3339 fp = new;
3340 }
3341 spin_unlock(&state_lock);
3342
3343 return fp;
3344 }
3345
3346 /*
3347 * Called to check deny when READ with all zero stateid or
3348 * WRITE with all zero or all one stateid
3349 */
3350 static __be32
3351 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3352 {
3353 struct nfs4_file *fp;
3354 __be32 ret = nfs_ok;
3355
3356 fp = find_file(&current_fh->fh_handle);
3357 if (!fp)
3358 return ret;
3359 /* Check for conflicting share reservations */
3360 spin_lock(&fp->fi_lock);
3361 if (fp->fi_share_deny & deny_type)
3362 ret = nfserr_locked;
3363 spin_unlock(&fp->fi_lock);
3364 put_nfs4_file(fp);
3365 return ret;
3366 }
3367
3368 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3369 {
3370 struct nfs4_delegation *dp = cb_to_delegation(cb);
3371 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3372 nfsd_net_id);
3373
3374 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3375
3376 /*
3377 * We can't do this in nfsd_break_deleg_cb because it is
3378 * already holding inode->i_lock.
3379 *
3380 * If the dl_time != 0, then we know that it has already been
3381 * queued for a lease break. Don't queue it again.
3382 */
3383 spin_lock(&state_lock);
3384 if (dp->dl_time == 0) {
3385 dp->dl_time = get_seconds();
3386 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3387 }
3388 spin_unlock(&state_lock);
3389 }
3390
3391 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3392 struct rpc_task *task)
3393 {
3394 struct nfs4_delegation *dp = cb_to_delegation(cb);
3395
3396 switch (task->tk_status) {
3397 case 0:
3398 return 1;
3399 case -EBADHANDLE:
3400 case -NFS4ERR_BAD_STATEID:
3401 /*
3402 * Race: client probably got cb_recall before open reply
3403 * granting delegation.
3404 */
3405 if (dp->dl_retries--) {
3406 rpc_delay(task, 2 * HZ);
3407 return 0;
3408 }
3409 /*FALLTHRU*/
3410 default:
3411 return -1;
3412 }
3413 }
3414
3415 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3416 {
3417 struct nfs4_delegation *dp = cb_to_delegation(cb);
3418
3419 nfs4_put_stid(&dp->dl_stid);
3420 }
3421
3422 static struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3423 .prepare = nfsd4_cb_recall_prepare,
3424 .done = nfsd4_cb_recall_done,
3425 .release = nfsd4_cb_recall_release,
3426 };
3427
3428 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3429 {
3430 /*
3431 * We're assuming the state code never drops its reference
3432 * without first removing the lease. Since we're in this lease
3433 * callback (and since the lease code is serialized by the kernel
3434 * lock) we know the server hasn't removed the lease yet, we know
3435 * it's safe to take a reference.
3436 */
3437 atomic_inc(&dp->dl_stid.sc_count);
3438 nfsd4_run_cb(&dp->dl_recall);
3439 }
3440
3441 /* Called from break_lease() with i_lock held. */
3442 static bool
3443 nfsd_break_deleg_cb(struct file_lock *fl)
3444 {
3445 bool ret = false;
3446 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3447 struct nfs4_delegation *dp;
3448
3449 if (!fp) {
3450 WARN(1, "(%p)->fl_owner NULL\n", fl);
3451 return ret;
3452 }
3453 if (fp->fi_had_conflict) {
3454 WARN(1, "duplicate break on %p\n", fp);
3455 return ret;
3456 }
3457 /*
3458 * We don't want the locks code to timeout the lease for us;
3459 * we'll remove it ourself if a delegation isn't returned
3460 * in time:
3461 */
3462 fl->fl_break_time = 0;
3463
3464 spin_lock(&fp->fi_lock);
3465 fp->fi_had_conflict = true;
3466 /*
3467 * If there are no delegations on the list, then return true
3468 * so that the lease code will go ahead and delete it.
3469 */
3470 if (list_empty(&fp->fi_delegations))
3471 ret = true;
3472 else
3473 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3474 nfsd_break_one_deleg(dp);
3475 spin_unlock(&fp->fi_lock);
3476 return ret;
3477 }
3478
3479 static int
3480 nfsd_change_deleg_cb(struct file_lock **onlist, int arg, struct list_head *dispose)
3481 {
3482 if (arg & F_UNLCK)
3483 return lease_modify(onlist, arg, dispose);
3484 else
3485 return -EAGAIN;
3486 }
3487
3488 static const struct lock_manager_operations nfsd_lease_mng_ops = {
3489 .lm_break = nfsd_break_deleg_cb,
3490 .lm_change = nfsd_change_deleg_cb,
3491 };
3492
3493 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3494 {
3495 if (nfsd4_has_session(cstate))
3496 return nfs_ok;
3497 if (seqid == so->so_seqid - 1)
3498 return nfserr_replay_me;
3499 if (seqid == so->so_seqid)
3500 return nfs_ok;
3501 return nfserr_bad_seqid;
3502 }
3503
3504 static __be32 lookup_clientid(clientid_t *clid,
3505 struct nfsd4_compound_state *cstate,
3506 struct nfsd_net *nn)
3507 {
3508 struct nfs4_client *found;
3509
3510 if (cstate->clp) {
3511 found = cstate->clp;
3512 if (!same_clid(&found->cl_clientid, clid))
3513 return nfserr_stale_clientid;
3514 return nfs_ok;
3515 }
3516
3517 if (STALE_CLIENTID(clid, nn))
3518 return nfserr_stale_clientid;
3519
3520 /*
3521 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3522 * cached already then we know this is for is for v4.0 and "sessions"
3523 * will be false.
3524 */
3525 WARN_ON_ONCE(cstate->session);
3526 spin_lock(&nn->client_lock);
3527 found = find_confirmed_client(clid, false, nn);
3528 if (!found) {
3529 spin_unlock(&nn->client_lock);
3530 return nfserr_expired;
3531 }
3532 atomic_inc(&found->cl_refcount);
3533 spin_unlock(&nn->client_lock);
3534
3535 /* Cache the nfs4_client in cstate! */
3536 cstate->clp = found;
3537 return nfs_ok;
3538 }
3539
3540 __be32
3541 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
3542 struct nfsd4_open *open, struct nfsd_net *nn)
3543 {
3544 clientid_t *clientid = &open->op_clientid;
3545 struct nfs4_client *clp = NULL;
3546 unsigned int strhashval;
3547 struct nfs4_openowner *oo = NULL;
3548 __be32 status;
3549
3550 if (STALE_CLIENTID(&open->op_clientid, nn))
3551 return nfserr_stale_clientid;
3552 /*
3553 * In case we need it later, after we've already created the
3554 * file and don't want to risk a further failure:
3555 */
3556 open->op_file = nfsd4_alloc_file();
3557 if (open->op_file == NULL)
3558 return nfserr_jukebox;
3559
3560 status = lookup_clientid(clientid, cstate, nn);
3561 if (status)
3562 return status;
3563 clp = cstate->clp;
3564
3565 strhashval = ownerstr_hashval(&open->op_owner);
3566 oo = find_openstateowner_str(strhashval, open, clp);
3567 open->op_openowner = oo;
3568 if (!oo) {
3569 goto new_owner;
3570 }
3571 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
3572 /* Replace unconfirmed owners without checking for replay. */
3573 release_openowner(oo);
3574 open->op_openowner = NULL;
3575 goto new_owner;
3576 }
3577 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
3578 if (status)
3579 return status;
3580 goto alloc_stateid;
3581 new_owner:
3582 oo = alloc_init_open_stateowner(strhashval, open, cstate);
3583 if (oo == NULL)
3584 return nfserr_jukebox;
3585 open->op_openowner = oo;
3586 alloc_stateid:
3587 open->op_stp = nfs4_alloc_open_stateid(clp);
3588 if (!open->op_stp)
3589 return nfserr_jukebox;
3590 return nfs_ok;
3591 }
3592
3593 static inline __be32
3594 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
3595 {
3596 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
3597 return nfserr_openmode;
3598 else
3599 return nfs_ok;
3600 }
3601
3602 static int share_access_to_flags(u32 share_access)
3603 {
3604 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
3605 }
3606
3607 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
3608 {
3609 struct nfs4_stid *ret;
3610
3611 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
3612 if (!ret)
3613 return NULL;
3614 return delegstateid(ret);
3615 }
3616
3617 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
3618 {
3619 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
3620 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
3621 }
3622
3623 static __be32
3624 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3625 struct nfs4_delegation **dp)
3626 {
3627 int flags;
3628 __be32 status = nfserr_bad_stateid;
3629 struct nfs4_delegation *deleg;
3630
3631 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
3632 if (deleg == NULL)
3633 goto out;
3634 flags = share_access_to_flags(open->op_share_access);
3635 status = nfs4_check_delegmode(deleg, flags);
3636 if (status) {
3637 nfs4_put_stid(&deleg->dl_stid);
3638 goto out;
3639 }
3640 *dp = deleg;
3641 out:
3642 if (!nfsd4_is_deleg_cur(open))
3643 return nfs_ok;
3644 if (status)
3645 return status;
3646 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3647 return nfs_ok;
3648 }
3649
3650 static struct nfs4_ol_stateid *
3651 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3652 {
3653 struct nfs4_ol_stateid *local, *ret = NULL;
3654 struct nfs4_openowner *oo = open->op_openowner;
3655
3656 spin_lock(&fp->fi_lock);
3657 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3658 /* ignore lock owners */
3659 if (local->st_stateowner->so_is_open_owner == 0)
3660 continue;
3661 if (local->st_stateowner == &oo->oo_owner) {
3662 ret = local;
3663 atomic_inc(&ret->st_stid.sc_count);
3664 break;
3665 }
3666 }
3667 spin_unlock(&fp->fi_lock);
3668 return ret;
3669 }
3670
3671 static inline int nfs4_access_to_access(u32 nfs4_access)
3672 {
3673 int flags = 0;
3674
3675 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
3676 flags |= NFSD_MAY_READ;
3677 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
3678 flags |= NFSD_MAY_WRITE;
3679 return flags;
3680 }
3681
3682 static inline __be32
3683 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
3684 struct nfsd4_open *open)
3685 {
3686 struct iattr iattr = {
3687 .ia_valid = ATTR_SIZE,
3688 .ia_size = 0,
3689 };
3690 if (!open->op_truncate)
3691 return 0;
3692 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
3693 return nfserr_inval;
3694 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
3695 }
3696
3697 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
3698 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
3699 struct nfsd4_open *open)
3700 {
3701 struct file *filp = NULL;
3702 __be32 status;
3703 int oflag = nfs4_access_to_omode(open->op_share_access);
3704 int access = nfs4_access_to_access(open->op_share_access);
3705 unsigned char old_access_bmap, old_deny_bmap;
3706
3707 spin_lock(&fp->fi_lock);
3708
3709 /*
3710 * Are we trying to set a deny mode that would conflict with
3711 * current access?
3712 */
3713 status = nfs4_file_check_deny(fp, open->op_share_deny);
3714 if (status != nfs_ok) {
3715 spin_unlock(&fp->fi_lock);
3716 goto out;
3717 }
3718
3719 /* set access to the file */
3720 status = nfs4_file_get_access(fp, open->op_share_access);
3721 if (status != nfs_ok) {
3722 spin_unlock(&fp->fi_lock);
3723 goto out;
3724 }
3725
3726 /* Set access bits in stateid */
3727 old_access_bmap = stp->st_access_bmap;
3728 set_access(open->op_share_access, stp);
3729
3730 /* Set new deny mask */
3731 old_deny_bmap = stp->st_deny_bmap;
3732 set_deny(open->op_share_deny, stp);
3733 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3734
3735 if (!fp->fi_fds[oflag]) {
3736 spin_unlock(&fp->fi_lock);
3737 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
3738 if (status)
3739 goto out_put_access;
3740 spin_lock(&fp->fi_lock);
3741 if (!fp->fi_fds[oflag]) {
3742 fp->fi_fds[oflag] = filp;
3743 filp = NULL;
3744 }
3745 }
3746 spin_unlock(&fp->fi_lock);
3747 if (filp)
3748 fput(filp);
3749
3750 status = nfsd4_truncate(rqstp, cur_fh, open);
3751 if (status)
3752 goto out_put_access;
3753 out:
3754 return status;
3755 out_put_access:
3756 stp->st_access_bmap = old_access_bmap;
3757 nfs4_file_put_access(fp, open->op_share_access);
3758 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
3759 goto out;
3760 }
3761
3762 static __be32
3763 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
3764 {
3765 __be32 status;
3766 unsigned char old_deny_bmap;
3767
3768 if (!test_access(open->op_share_access, stp))
3769 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
3770
3771 /* test and set deny mode */
3772 spin_lock(&fp->fi_lock);
3773 status = nfs4_file_check_deny(fp, open->op_share_deny);
3774 if (status == nfs_ok) {
3775 old_deny_bmap = stp->st_deny_bmap;
3776 set_deny(open->op_share_deny, stp);
3777 fp->fi_share_deny |=
3778 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3779 }
3780 spin_unlock(&fp->fi_lock);
3781
3782 if (status != nfs_ok)
3783 return status;
3784
3785 status = nfsd4_truncate(rqstp, cur_fh, open);
3786 if (status != nfs_ok)
3787 reset_union_bmap_deny(old_deny_bmap, stp);
3788 return status;
3789 }
3790
3791 static void
3792 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
3793 {
3794 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3795 }
3796
3797 /* Should we give out recallable state?: */
3798 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
3799 {
3800 if (clp->cl_cb_state == NFSD4_CB_UP)
3801 return true;
3802 /*
3803 * In the sessions case, since we don't have to establish a
3804 * separate connection for callbacks, we assume it's OK
3805 * until we hear otherwise:
3806 */
3807 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
3808 }
3809
3810 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
3811 {
3812 struct file_lock *fl;
3813
3814 fl = locks_alloc_lock();
3815 if (!fl)
3816 return NULL;
3817 fl->fl_lmops = &nfsd_lease_mng_ops;
3818 fl->fl_flags = FL_DELEG;
3819 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
3820 fl->fl_end = OFFSET_MAX;
3821 fl->fl_owner = (fl_owner_t)fp;
3822 fl->fl_pid = current->tgid;
3823 return fl;
3824 }
3825
3826 static int nfs4_setlease(struct nfs4_delegation *dp)
3827 {
3828 struct nfs4_file *fp = dp->dl_stid.sc_file;
3829 struct file_lock *fl, *ret;
3830 struct file *filp;
3831 int status = 0;
3832
3833 fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
3834 if (!fl)
3835 return -ENOMEM;
3836 filp = find_readable_file(fp);
3837 if (!filp) {
3838 /* We should always have a readable file here */
3839 WARN_ON_ONCE(1);
3840 return -EBADF;
3841 }
3842 fl->fl_file = filp;
3843 ret = fl;
3844 status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
3845 if (fl)
3846 locks_free_lock(fl);
3847 if (status)
3848 goto out_fput;
3849 spin_lock(&state_lock);
3850 spin_lock(&fp->fi_lock);
3851 /* Did the lease get broken before we took the lock? */
3852 status = -EAGAIN;
3853 if (fp->fi_had_conflict)
3854 goto out_unlock;
3855 /* Race breaker */
3856 if (fp->fi_deleg_file) {
3857 status = 0;
3858 atomic_inc(&fp->fi_delegees);
3859 hash_delegation_locked(dp, fp);
3860 goto out_unlock;
3861 }
3862 fp->fi_deleg_file = filp;
3863 atomic_set(&fp->fi_delegees, 1);
3864 hash_delegation_locked(dp, fp);
3865 spin_unlock(&fp->fi_lock);
3866 spin_unlock(&state_lock);
3867 return 0;
3868 out_unlock:
3869 spin_unlock(&fp->fi_lock);
3870 spin_unlock(&state_lock);
3871 out_fput:
3872 fput(filp);
3873 return status;
3874 }
3875
3876 static struct nfs4_delegation *
3877 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
3878 struct nfs4_file *fp)
3879 {
3880 int status;
3881 struct nfs4_delegation *dp;
3882
3883 if (fp->fi_had_conflict)
3884 return ERR_PTR(-EAGAIN);
3885
3886 dp = alloc_init_deleg(clp, fh);
3887 if (!dp)
3888 return ERR_PTR(-ENOMEM);
3889
3890 get_nfs4_file(fp);
3891 spin_lock(&state_lock);
3892 spin_lock(&fp->fi_lock);
3893 dp->dl_stid.sc_file = fp;
3894 if (!fp->fi_deleg_file) {
3895 spin_unlock(&fp->fi_lock);
3896 spin_unlock(&state_lock);
3897 status = nfs4_setlease(dp);
3898 goto out;
3899 }
3900 if (fp->fi_had_conflict) {
3901 status = -EAGAIN;
3902 goto out_unlock;
3903 }
3904 atomic_inc(&fp->fi_delegees);
3905 hash_delegation_locked(dp, fp);
3906 status = 0;
3907 out_unlock:
3908 spin_unlock(&fp->fi_lock);
3909 spin_unlock(&state_lock);
3910 out:
3911 if (status) {
3912 nfs4_put_stid(&dp->dl_stid);
3913 return ERR_PTR(status);
3914 }
3915 return dp;
3916 }
3917
3918 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
3919 {
3920 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3921 if (status == -EAGAIN)
3922 open->op_why_no_deleg = WND4_CONTENTION;
3923 else {
3924 open->op_why_no_deleg = WND4_RESOURCE;
3925 switch (open->op_deleg_want) {
3926 case NFS4_SHARE_WANT_READ_DELEG:
3927 case NFS4_SHARE_WANT_WRITE_DELEG:
3928 case NFS4_SHARE_WANT_ANY_DELEG:
3929 break;
3930 case NFS4_SHARE_WANT_CANCEL:
3931 open->op_why_no_deleg = WND4_CANCELLED;
3932 break;
3933 case NFS4_SHARE_WANT_NO_DELEG:
3934 WARN_ON_ONCE(1);
3935 }
3936 }
3937 }
3938
3939 /*
3940 * Attempt to hand out a delegation.
3941 *
3942 * Note we don't support write delegations, and won't until the vfs has
3943 * proper support for them.
3944 */
3945 static void
3946 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
3947 struct nfs4_ol_stateid *stp)
3948 {
3949 struct nfs4_delegation *dp;
3950 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
3951 struct nfs4_client *clp = stp->st_stid.sc_client;
3952 int cb_up;
3953 int status = 0;
3954
3955 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
3956 open->op_recall = 0;
3957 switch (open->op_claim_type) {
3958 case NFS4_OPEN_CLAIM_PREVIOUS:
3959 if (!cb_up)
3960 open->op_recall = 1;
3961 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
3962 goto out_no_deleg;
3963 break;
3964 case NFS4_OPEN_CLAIM_NULL:
3965 case NFS4_OPEN_CLAIM_FH:
3966 /*
3967 * Let's not give out any delegations till everyone's
3968 * had the chance to reclaim theirs....
3969 */
3970 if (locks_in_grace(clp->net))
3971 goto out_no_deleg;
3972 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
3973 goto out_no_deleg;
3974 /*
3975 * Also, if the file was opened for write or
3976 * create, there's a good chance the client's
3977 * about to write to it, resulting in an
3978 * immediate recall (since we don't support
3979 * write delegations):
3980 */
3981 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
3982 goto out_no_deleg;
3983 if (open->op_create == NFS4_OPEN_CREATE)
3984 goto out_no_deleg;
3985 break;
3986 default:
3987 goto out_no_deleg;
3988 }
3989 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file);
3990 if (IS_ERR(dp))
3991 goto out_no_deleg;
3992
3993 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
3994
3995 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
3996 STATEID_VAL(&dp->dl_stid.sc_stateid));
3997 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
3998 nfs4_put_stid(&dp->dl_stid);
3999 return;
4000 out_no_deleg:
4001 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4002 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4003 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4004 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4005 open->op_recall = 1;
4006 }
4007
4008 /* 4.1 client asking for a delegation? */
4009 if (open->op_deleg_want)
4010 nfsd4_open_deleg_none_ext(open, status);
4011 return;
4012 }
4013
4014 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4015 struct nfs4_delegation *dp)
4016 {
4017 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4018 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4019 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4020 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4021 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4022 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4023 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4024 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4025 }
4026 /* Otherwise the client must be confused wanting a delegation
4027 * it already has, therefore we don't return
4028 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4029 */
4030 }
4031
4032 __be32
4033 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4034 {
4035 struct nfsd4_compoundres *resp = rqstp->rq_resp;
4036 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4037 struct nfs4_file *fp = NULL;
4038 struct nfs4_ol_stateid *stp = NULL;
4039 struct nfs4_delegation *dp = NULL;
4040 __be32 status;
4041
4042 /*
4043 * Lookup file; if found, lookup stateid and check open request,
4044 * and check for delegations in the process of being recalled.
4045 * If not found, create the nfs4_file struct
4046 */
4047 fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
4048 if (fp != open->op_file) {
4049 status = nfs4_check_deleg(cl, open, &dp);
4050 if (status)
4051 goto out;
4052 stp = nfsd4_find_existing_open(fp, open);
4053 } else {
4054 open->op_file = NULL;
4055 status = nfserr_bad_stateid;
4056 if (nfsd4_is_deleg_cur(open))
4057 goto out;
4058 status = nfserr_jukebox;
4059 }
4060
4061 /*
4062 * OPEN the file, or upgrade an existing OPEN.
4063 * If truncate fails, the OPEN fails.
4064 */
4065 if (stp) {
4066 /* Stateid was found, this is an OPEN upgrade */
4067 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4068 if (status)
4069 goto out;
4070 } else {
4071 stp = open->op_stp;
4072 open->op_stp = NULL;
4073 init_open_stateid(stp, fp, open);
4074 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4075 if (status) {
4076 release_open_stateid(stp);
4077 goto out;
4078 }
4079 }
4080 update_stateid(&stp->st_stid.sc_stateid);
4081 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4082
4083 if (nfsd4_has_session(&resp->cstate)) {
4084 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4085 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4086 open->op_why_no_deleg = WND4_NOT_WANTED;
4087 goto nodeleg;
4088 }
4089 }
4090
4091 /*
4092 * Attempt to hand out a delegation. No error return, because the
4093 * OPEN succeeds even if we fail.
4094 */
4095 nfs4_open_delegation(current_fh, open, stp);
4096 nodeleg:
4097 status = nfs_ok;
4098
4099 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4100 STATEID_VAL(&stp->st_stid.sc_stateid));
4101 out:
4102 /* 4.1 client trying to upgrade/downgrade delegation? */
4103 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4104 open->op_deleg_want)
4105 nfsd4_deleg_xgrade_none_ext(open, dp);
4106
4107 if (fp)
4108 put_nfs4_file(fp);
4109 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4110 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
4111 /*
4112 * To finish the open response, we just need to set the rflags.
4113 */
4114 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4115 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
4116 !nfsd4_has_session(&resp->cstate))
4117 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4118 if (dp)
4119 nfs4_put_stid(&dp->dl_stid);
4120 if (stp)
4121 nfs4_put_stid(&stp->st_stid);
4122
4123 return status;
4124 }
4125
4126 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4127 struct nfsd4_open *open, __be32 status)
4128 {
4129 if (open->op_openowner) {
4130 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4131
4132 nfsd4_cstate_assign_replay(cstate, so);
4133 nfs4_put_stateowner(so);
4134 }
4135 if (open->op_file)
4136 kmem_cache_free(file_slab, open->op_file);
4137 if (open->op_stp)
4138 nfs4_put_stid(&open->op_stp->st_stid);
4139 }
4140
4141 __be32
4142 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4143 clientid_t *clid)
4144 {
4145 struct nfs4_client *clp;
4146 __be32 status;
4147 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4148
4149 dprintk("process_renew(%08x/%08x): starting\n",
4150 clid->cl_boot, clid->cl_id);
4151 status = lookup_clientid(clid, cstate, nn);
4152 if (status)
4153 goto out;
4154 clp = cstate->clp;
4155 status = nfserr_cb_path_down;
4156 if (!list_empty(&clp->cl_delegations)
4157 && clp->cl_cb_state != NFSD4_CB_UP)
4158 goto out;
4159 status = nfs_ok;
4160 out:
4161 return status;
4162 }
4163
4164 void
4165 nfsd4_end_grace(struct nfsd_net *nn)
4166 {
4167 /* do nothing if grace period already ended */
4168 if (nn->grace_ended)
4169 return;
4170
4171 dprintk("NFSD: end of grace period\n");
4172 nn->grace_ended = true;
4173 /*
4174 * If the server goes down again right now, an NFSv4
4175 * client will still be allowed to reclaim after it comes back up,
4176 * even if it hasn't yet had a chance to reclaim state this time.
4177 *
4178 */
4179 nfsd4_record_grace_done(nn);
4180 /*
4181 * At this point, NFSv4 clients can still reclaim. But if the
4182 * server crashes, any that have not yet reclaimed will be out
4183 * of luck on the next boot.
4184 *
4185 * (NFSv4.1+ clients are considered to have reclaimed once they
4186 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4187 * have reclaimed after their first OPEN.)
4188 */
4189 locks_end_grace(&nn->nfsd4_manager);
4190 /*
4191 * At this point, and once lockd and/or any other containers
4192 * exit their grace period, further reclaims will fail and
4193 * regular locking can resume.
4194 */
4195 }
4196
4197 static time_t
4198 nfs4_laundromat(struct nfsd_net *nn)
4199 {
4200 struct nfs4_client *clp;
4201 struct nfs4_openowner *oo;
4202 struct nfs4_delegation *dp;
4203 struct nfs4_ol_stateid *stp;
4204 struct list_head *pos, *next, reaplist;
4205 time_t cutoff = get_seconds() - nn->nfsd4_lease;
4206 time_t t, new_timeo = nn->nfsd4_lease;
4207
4208 dprintk("NFSD: laundromat service - starting\n");
4209 nfsd4_end_grace(nn);
4210 INIT_LIST_HEAD(&reaplist);
4211 spin_lock(&nn->client_lock);
4212 list_for_each_safe(pos, next, &nn->client_lru) {
4213 clp = list_entry(pos, struct nfs4_client, cl_lru);
4214 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4215 t = clp->cl_time - cutoff;
4216 new_timeo = min(new_timeo, t);
4217 break;
4218 }
4219 if (mark_client_expired_locked(clp)) {
4220 dprintk("NFSD: client in use (clientid %08x)\n",
4221 clp->cl_clientid.cl_id);
4222 continue;
4223 }
4224 list_add(&clp->cl_lru, &reaplist);
4225 }
4226 spin_unlock(&nn->client_lock);
4227 list_for_each_safe(pos, next, &reaplist) {
4228 clp = list_entry(pos, struct nfs4_client, cl_lru);
4229 dprintk("NFSD: purging unused client (clientid %08x)\n",
4230 clp->cl_clientid.cl_id);
4231 list_del_init(&clp->cl_lru);
4232 expire_client(clp);
4233 }
4234 spin_lock(&state_lock);
4235 list_for_each_safe(pos, next, &nn->del_recall_lru) {
4236 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4237 if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
4238 continue;
4239 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4240 t = dp->dl_time - cutoff;
4241 new_timeo = min(new_timeo, t);
4242 break;
4243 }
4244 unhash_delegation_locked(dp);
4245 list_add(&dp->dl_recall_lru, &reaplist);
4246 }
4247 spin_unlock(&state_lock);
4248 while (!list_empty(&reaplist)) {
4249 dp = list_first_entry(&reaplist, struct nfs4_delegation,
4250 dl_recall_lru);
4251 list_del_init(&dp->dl_recall_lru);
4252 revoke_delegation(dp);
4253 }
4254
4255 spin_lock(&nn->client_lock);
4256 while (!list_empty(&nn->close_lru)) {
4257 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4258 oo_close_lru);
4259 if (time_after((unsigned long)oo->oo_time,
4260 (unsigned long)cutoff)) {
4261 t = oo->oo_time - cutoff;
4262 new_timeo = min(new_timeo, t);
4263 break;
4264 }
4265 list_del_init(&oo->oo_close_lru);
4266 stp = oo->oo_last_closed_stid;
4267 oo->oo_last_closed_stid = NULL;
4268 spin_unlock(&nn->client_lock);
4269 nfs4_put_stid(&stp->st_stid);
4270 spin_lock(&nn->client_lock);
4271 }
4272 spin_unlock(&nn->client_lock);
4273
4274 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4275 return new_timeo;
4276 }
4277
4278 static struct workqueue_struct *laundry_wq;
4279 static void laundromat_main(struct work_struct *);
4280
4281 static void
4282 laundromat_main(struct work_struct *laundry)
4283 {
4284 time_t t;
4285 struct delayed_work *dwork = container_of(laundry, struct delayed_work,
4286 work);
4287 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4288 laundromat_work);
4289
4290 t = nfs4_laundromat(nn);
4291 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4292 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4293 }
4294
4295 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
4296 {
4297 if (!nfsd_fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
4298 return nfserr_bad_stateid;
4299 return nfs_ok;
4300 }
4301
4302 static inline int
4303 access_permit_read(struct nfs4_ol_stateid *stp)
4304 {
4305 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4306 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4307 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4308 }
4309
4310 static inline int
4311 access_permit_write(struct nfs4_ol_stateid *stp)
4312 {
4313 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4314 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4315 }
4316
4317 static
4318 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4319 {
4320 __be32 status = nfserr_openmode;
4321
4322 /* For lock stateid's, we test the parent open, not the lock: */
4323 if (stp->st_openstp)
4324 stp = stp->st_openstp;
4325 if ((flags & WR_STATE) && !access_permit_write(stp))
4326 goto out;
4327 if ((flags & RD_STATE) && !access_permit_read(stp))
4328 goto out;
4329 status = nfs_ok;
4330 out:
4331 return status;
4332 }
4333
4334 static inline __be32
4335 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4336 {
4337 if (ONE_STATEID(stateid) && (flags & RD_STATE))
4338 return nfs_ok;
4339 else if (locks_in_grace(net)) {
4340 /* Answer in remaining cases depends on existence of
4341 * conflicting state; so we must wait out the grace period. */
4342 return nfserr_grace;
4343 } else if (flags & WR_STATE)
4344 return nfs4_share_conflict(current_fh,
4345 NFS4_SHARE_DENY_WRITE);
4346 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4347 return nfs4_share_conflict(current_fh,
4348 NFS4_SHARE_DENY_READ);
4349 }
4350
4351 /*
4352 * Allow READ/WRITE during grace period on recovered state only for files
4353 * that are not able to provide mandatory locking.
4354 */
4355 static inline int
4356 grace_disallows_io(struct net *net, struct inode *inode)
4357 {
4358 return locks_in_grace(net) && mandatory_lock(inode);
4359 }
4360
4361 /* Returns true iff a is later than b: */
4362 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
4363 {
4364 return (s32)(a->si_generation - b->si_generation) > 0;
4365 }
4366
4367 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4368 {
4369 /*
4370 * When sessions are used the stateid generation number is ignored
4371 * when it is zero.
4372 */
4373 if (has_session && in->si_generation == 0)
4374 return nfs_ok;
4375
4376 if (in->si_generation == ref->si_generation)
4377 return nfs_ok;
4378
4379 /* If the client sends us a stateid from the future, it's buggy: */
4380 if (stateid_generation_after(in, ref))
4381 return nfserr_bad_stateid;
4382 /*
4383 * However, we could see a stateid from the past, even from a
4384 * non-buggy client. For example, if the client sends a lock
4385 * while some IO is outstanding, the lock may bump si_generation
4386 * while the IO is still in flight. The client could avoid that
4387 * situation by waiting for responses on all the IO requests,
4388 * but better performance may result in retrying IO that
4389 * receives an old_stateid error if requests are rarely
4390 * reordered in flight:
4391 */
4392 return nfserr_old_stateid;
4393 }
4394
4395 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4396 {
4397 struct nfs4_stid *s;
4398 struct nfs4_ol_stateid *ols;
4399 __be32 status = nfserr_bad_stateid;
4400
4401 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4402 return status;
4403 /* Client debugging aid. */
4404 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4405 char addr_str[INET6_ADDRSTRLEN];
4406 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4407 sizeof(addr_str));
4408 pr_warn_ratelimited("NFSD: client %s testing state ID "
4409 "with incorrect client ID\n", addr_str);
4410 return status;
4411 }
4412 spin_lock(&cl->cl_lock);
4413 s = find_stateid_locked(cl, stateid);
4414 if (!s)
4415 goto out_unlock;
4416 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
4417 if (status)
4418 goto out_unlock;
4419 switch (s->sc_type) {
4420 case NFS4_DELEG_STID:
4421 status = nfs_ok;
4422 break;
4423 case NFS4_REVOKED_DELEG_STID:
4424 status = nfserr_deleg_revoked;
4425 break;
4426 case NFS4_OPEN_STID:
4427 case NFS4_LOCK_STID:
4428 ols = openlockstateid(s);
4429 if (ols->st_stateowner->so_is_open_owner
4430 && !(openowner(ols->st_stateowner)->oo_flags
4431 & NFS4_OO_CONFIRMED))
4432 status = nfserr_bad_stateid;
4433 else
4434 status = nfs_ok;
4435 break;
4436 default:
4437 printk("unknown stateid type %x\n", s->sc_type);
4438 /* Fallthrough */
4439 case NFS4_CLOSED_STID:
4440 case NFS4_CLOSED_DELEG_STID:
4441 status = nfserr_bad_stateid;
4442 }
4443 out_unlock:
4444 spin_unlock(&cl->cl_lock);
4445 return status;
4446 }
4447
4448 static __be32
4449 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4450 stateid_t *stateid, unsigned char typemask,
4451 struct nfs4_stid **s, struct nfsd_net *nn)
4452 {
4453 __be32 status;
4454
4455 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4456 return nfserr_bad_stateid;
4457 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
4458 if (status == nfserr_stale_clientid) {
4459 if (cstate->session)
4460 return nfserr_bad_stateid;
4461 return nfserr_stale_stateid;
4462 }
4463 if (status)
4464 return status;
4465 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
4466 if (!*s)
4467 return nfserr_bad_stateid;
4468 return nfs_ok;
4469 }
4470
4471 /*
4472 * Checks for stateid operations
4473 */
4474 __be32
4475 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
4476 stateid_t *stateid, int flags, struct file **filpp)
4477 {
4478 struct nfs4_stid *s;
4479 struct nfs4_ol_stateid *stp = NULL;
4480 struct nfs4_delegation *dp = NULL;
4481 struct svc_fh *current_fh = &cstate->current_fh;
4482 struct inode *ino = current_fh->fh_dentry->d_inode;
4483 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4484 struct file *file = NULL;
4485 __be32 status;
4486
4487 if (filpp)
4488 *filpp = NULL;
4489
4490 if (grace_disallows_io(net, ino))
4491 return nfserr_grace;
4492
4493 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4494 return check_special_stateids(net, current_fh, stateid, flags);
4495
4496 status = nfsd4_lookup_stateid(cstate, stateid,
4497 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
4498 &s, nn);
4499 if (status)
4500 return status;
4501 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
4502 if (status)
4503 goto out;
4504 switch (s->sc_type) {
4505 case NFS4_DELEG_STID:
4506 dp = delegstateid(s);
4507 status = nfs4_check_delegmode(dp, flags);
4508 if (status)
4509 goto out;
4510 if (filpp) {
4511 file = dp->dl_stid.sc_file->fi_deleg_file;
4512 if (!file) {
4513 WARN_ON_ONCE(1);
4514 status = nfserr_serverfault;
4515 goto out;
4516 }
4517 get_file(file);
4518 }
4519 break;
4520 case NFS4_OPEN_STID:
4521 case NFS4_LOCK_STID:
4522 stp = openlockstateid(s);
4523 status = nfs4_check_fh(current_fh, stp);
4524 if (status)
4525 goto out;
4526 if (stp->st_stateowner->so_is_open_owner
4527 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4528 goto out;
4529 status = nfs4_check_openmode(stp, flags);
4530 if (status)
4531 goto out;
4532 if (filpp) {
4533 struct nfs4_file *fp = stp->st_stid.sc_file;
4534
4535 if (flags & RD_STATE)
4536 file = find_readable_file(fp);
4537 else
4538 file = find_writeable_file(fp);
4539 }
4540 break;
4541 default:
4542 status = nfserr_bad_stateid;
4543 goto out;
4544 }
4545 status = nfs_ok;
4546 if (file)
4547 *filpp = file;
4548 out:
4549 nfs4_put_stid(s);
4550 return status;
4551 }
4552
4553 /*
4554 * Test if the stateid is valid
4555 */
4556 __be32
4557 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4558 struct nfsd4_test_stateid *test_stateid)
4559 {
4560 struct nfsd4_test_stateid_id *stateid;
4561 struct nfs4_client *cl = cstate->session->se_client;
4562
4563 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
4564 stateid->ts_id_status =
4565 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
4566
4567 return nfs_ok;
4568 }
4569
4570 __be32
4571 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4572 struct nfsd4_free_stateid *free_stateid)
4573 {
4574 stateid_t *stateid = &free_stateid->fr_stateid;
4575 struct nfs4_stid *s;
4576 struct nfs4_delegation *dp;
4577 struct nfs4_ol_stateid *stp;
4578 struct nfs4_client *cl = cstate->session->se_client;
4579 __be32 ret = nfserr_bad_stateid;
4580
4581 spin_lock(&cl->cl_lock);
4582 s = find_stateid_locked(cl, stateid);
4583 if (!s)
4584 goto out_unlock;
4585 switch (s->sc_type) {
4586 case NFS4_DELEG_STID:
4587 ret = nfserr_locks_held;
4588 break;
4589 case NFS4_OPEN_STID:
4590 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4591 if (ret)
4592 break;
4593 ret = nfserr_locks_held;
4594 break;
4595 case NFS4_LOCK_STID:
4596 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4597 if (ret)
4598 break;
4599 stp = openlockstateid(s);
4600 ret = nfserr_locks_held;
4601 if (check_for_locks(stp->st_stid.sc_file,
4602 lockowner(stp->st_stateowner)))
4603 break;
4604 unhash_lock_stateid(stp);
4605 spin_unlock(&cl->cl_lock);
4606 nfs4_put_stid(s);
4607 ret = nfs_ok;
4608 goto out;
4609 case NFS4_REVOKED_DELEG_STID:
4610 dp = delegstateid(s);
4611 list_del_init(&dp->dl_recall_lru);
4612 spin_unlock(&cl->cl_lock);
4613 nfs4_put_stid(s);
4614 ret = nfs_ok;
4615 goto out;
4616 /* Default falls through and returns nfserr_bad_stateid */
4617 }
4618 out_unlock:
4619 spin_unlock(&cl->cl_lock);
4620 out:
4621 return ret;
4622 }
4623
4624 static inline int
4625 setlkflg (int type)
4626 {
4627 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
4628 RD_STATE : WR_STATE;
4629 }
4630
4631 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
4632 {
4633 struct svc_fh *current_fh = &cstate->current_fh;
4634 struct nfs4_stateowner *sop = stp->st_stateowner;
4635 __be32 status;
4636
4637 status = nfsd4_check_seqid(cstate, sop, seqid);
4638 if (status)
4639 return status;
4640 if (stp->st_stid.sc_type == NFS4_CLOSED_STID
4641 || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4642 /*
4643 * "Closed" stateid's exist *only* to return
4644 * nfserr_replay_me from the previous step, and
4645 * revoked delegations are kept only for free_stateid.
4646 */
4647 return nfserr_bad_stateid;
4648 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4649 if (status)
4650 return status;
4651 return nfs4_check_fh(current_fh, stp);
4652 }
4653
4654 /*
4655 * Checks for sequence id mutating operations.
4656 */
4657 static __be32
4658 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4659 stateid_t *stateid, char typemask,
4660 struct nfs4_ol_stateid **stpp,
4661 struct nfsd_net *nn)
4662 {
4663 __be32 status;
4664 struct nfs4_stid *s;
4665 struct nfs4_ol_stateid *stp = NULL;
4666
4667 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
4668 seqid, STATEID_VAL(stateid));
4669
4670 *stpp = NULL;
4671 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
4672 if (status)
4673 return status;
4674 stp = openlockstateid(s);
4675 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
4676
4677 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
4678 if (!status)
4679 *stpp = stp;
4680 else
4681 nfs4_put_stid(&stp->st_stid);
4682 return status;
4683 }
4684
4685 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4686 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
4687 {
4688 __be32 status;
4689 struct nfs4_openowner *oo;
4690 struct nfs4_ol_stateid *stp;
4691
4692 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
4693 NFS4_OPEN_STID, &stp, nn);
4694 if (status)
4695 return status;
4696 oo = openowner(stp->st_stateowner);
4697 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4698 nfs4_put_stid(&stp->st_stid);
4699 return nfserr_bad_stateid;
4700 }
4701 *stpp = stp;
4702 return nfs_ok;
4703 }
4704
4705 __be32
4706 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4707 struct nfsd4_open_confirm *oc)
4708 {
4709 __be32 status;
4710 struct nfs4_openowner *oo;
4711 struct nfs4_ol_stateid *stp;
4712 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4713
4714 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
4715 cstate->current_fh.fh_dentry);
4716
4717 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
4718 if (status)
4719 return status;
4720
4721 status = nfs4_preprocess_seqid_op(cstate,
4722 oc->oc_seqid, &oc->oc_req_stateid,
4723 NFS4_OPEN_STID, &stp, nn);
4724 if (status)
4725 goto out;
4726 oo = openowner(stp->st_stateowner);
4727 status = nfserr_bad_stateid;
4728 if (oo->oo_flags & NFS4_OO_CONFIRMED)
4729 goto put_stateid;
4730 oo->oo_flags |= NFS4_OO_CONFIRMED;
4731 update_stateid(&stp->st_stid.sc_stateid);
4732 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4733 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
4734 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
4735
4736 nfsd4_client_record_create(oo->oo_owner.so_client);
4737 status = nfs_ok;
4738 put_stateid:
4739 nfs4_put_stid(&stp->st_stid);
4740 out:
4741 nfsd4_bump_seqid(cstate, status);
4742 return status;
4743 }
4744
4745 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
4746 {
4747 if (!test_access(access, stp))
4748 return;
4749 nfs4_file_put_access(stp->st_stid.sc_file, access);
4750 clear_access(access, stp);
4751 }
4752
4753 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
4754 {
4755 switch (to_access) {
4756 case NFS4_SHARE_ACCESS_READ:
4757 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
4758 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
4759 break;
4760 case NFS4_SHARE_ACCESS_WRITE:
4761 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
4762 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
4763 break;
4764 case NFS4_SHARE_ACCESS_BOTH:
4765 break;
4766 default:
4767 WARN_ON_ONCE(1);
4768 }
4769 }
4770
4771 __be32
4772 nfsd4_open_downgrade(struct svc_rqst *rqstp,
4773 struct nfsd4_compound_state *cstate,
4774 struct nfsd4_open_downgrade *od)
4775 {
4776 __be32 status;
4777 struct nfs4_ol_stateid *stp;
4778 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4779
4780 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
4781 cstate->current_fh.fh_dentry);
4782
4783 /* We don't yet support WANT bits: */
4784 if (od->od_deleg_want)
4785 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
4786 od->od_deleg_want);
4787
4788 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
4789 &od->od_stateid, &stp, nn);
4790 if (status)
4791 goto out;
4792 status = nfserr_inval;
4793 if (!test_access(od->od_share_access, stp)) {
4794 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
4795 stp->st_access_bmap, od->od_share_access);
4796 goto put_stateid;
4797 }
4798 if (!test_deny(od->od_share_deny, stp)) {
4799 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
4800 stp->st_deny_bmap, od->od_share_deny);
4801 goto put_stateid;
4802 }
4803 nfs4_stateid_downgrade(stp, od->od_share_access);
4804
4805 reset_union_bmap_deny(od->od_share_deny, stp);
4806
4807 update_stateid(&stp->st_stid.sc_stateid);
4808 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4809 status = nfs_ok;
4810 put_stateid:
4811 nfs4_put_stid(&stp->st_stid);
4812 out:
4813 nfsd4_bump_seqid(cstate, status);
4814 return status;
4815 }
4816
4817 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
4818 {
4819 struct nfs4_client *clp = s->st_stid.sc_client;
4820 LIST_HEAD(reaplist);
4821
4822 s->st_stid.sc_type = NFS4_CLOSED_STID;
4823 spin_lock(&clp->cl_lock);
4824 unhash_open_stateid(s, &reaplist);
4825
4826 if (clp->cl_minorversion) {
4827 put_ol_stateid_locked(s, &reaplist);
4828 spin_unlock(&clp->cl_lock);
4829 free_ol_stateid_reaplist(&reaplist);
4830 } else {
4831 spin_unlock(&clp->cl_lock);
4832 free_ol_stateid_reaplist(&reaplist);
4833 move_to_close_lru(s, clp->net);
4834 }
4835 }
4836
4837 /*
4838 * nfs4_unlock_state() called after encode
4839 */
4840 __be32
4841 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4842 struct nfsd4_close *close)
4843 {
4844 __be32 status;
4845 struct nfs4_ol_stateid *stp;
4846 struct net *net = SVC_NET(rqstp);
4847 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4848
4849 dprintk("NFSD: nfsd4_close on file %pd\n",
4850 cstate->current_fh.fh_dentry);
4851
4852 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
4853 &close->cl_stateid,
4854 NFS4_OPEN_STID|NFS4_CLOSED_STID,
4855 &stp, nn);
4856 nfsd4_bump_seqid(cstate, status);
4857 if (status)
4858 goto out;
4859 update_stateid(&stp->st_stid.sc_stateid);
4860 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4861
4862 nfsd4_close_open_stateid(stp);
4863
4864 /* put reference from nfs4_preprocess_seqid_op */
4865 nfs4_put_stid(&stp->st_stid);
4866 out:
4867 return status;
4868 }
4869
4870 __be32
4871 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4872 struct nfsd4_delegreturn *dr)
4873 {
4874 struct nfs4_delegation *dp;
4875 stateid_t *stateid = &dr->dr_stateid;
4876 struct nfs4_stid *s;
4877 __be32 status;
4878 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4879
4880 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4881 return status;
4882
4883 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
4884 if (status)
4885 goto out;
4886 dp = delegstateid(s);
4887 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
4888 if (status)
4889 goto put_stateid;
4890
4891 destroy_delegation(dp);
4892 put_stateid:
4893 nfs4_put_stid(&dp->dl_stid);
4894 out:
4895 return status;
4896 }
4897
4898
4899 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
4900
4901 static inline u64
4902 end_offset(u64 start, u64 len)
4903 {
4904 u64 end;
4905
4906 end = start + len;
4907 return end >= start ? end: NFS4_MAX_UINT64;
4908 }
4909
4910 /* last octet in a range */
4911 static inline u64
4912 last_byte_offset(u64 start, u64 len)
4913 {
4914 u64 end;
4915
4916 WARN_ON_ONCE(!len);
4917 end = start + len;
4918 return end > start ? end - 1: NFS4_MAX_UINT64;
4919 }
4920
4921 /*
4922 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
4923 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
4924 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
4925 * locking, this prevents us from being completely protocol-compliant. The
4926 * real solution to this problem is to start using unsigned file offsets in
4927 * the VFS, but this is a very deep change!
4928 */
4929 static inline void
4930 nfs4_transform_lock_offset(struct file_lock *lock)
4931 {
4932 if (lock->fl_start < 0)
4933 lock->fl_start = OFFSET_MAX;
4934 if (lock->fl_end < 0)
4935 lock->fl_end = OFFSET_MAX;
4936 }
4937
4938 static void nfsd4_fl_get_owner(struct file_lock *dst, struct file_lock *src)
4939 {
4940 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)src->fl_owner;
4941 dst->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lo->lo_owner));
4942 }
4943
4944 static void nfsd4_fl_put_owner(struct file_lock *fl)
4945 {
4946 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
4947
4948 if (lo) {
4949 nfs4_put_stateowner(&lo->lo_owner);
4950 fl->fl_owner = NULL;
4951 }
4952 }
4953
4954 static const struct lock_manager_operations nfsd_posix_mng_ops = {
4955 .lm_get_owner = nfsd4_fl_get_owner,
4956 .lm_put_owner = nfsd4_fl_put_owner,
4957 };
4958
4959 static inline void
4960 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
4961 {
4962 struct nfs4_lockowner *lo;
4963
4964 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
4965 lo = (struct nfs4_lockowner *) fl->fl_owner;
4966 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
4967 lo->lo_owner.so_owner.len, GFP_KERNEL);
4968 if (!deny->ld_owner.data)
4969 /* We just don't care that much */
4970 goto nevermind;
4971 deny->ld_owner.len = lo->lo_owner.so_owner.len;
4972 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
4973 } else {
4974 nevermind:
4975 deny->ld_owner.len = 0;
4976 deny->ld_owner.data = NULL;
4977 deny->ld_clientid.cl_boot = 0;
4978 deny->ld_clientid.cl_id = 0;
4979 }
4980 deny->ld_start = fl->fl_start;
4981 deny->ld_length = NFS4_MAX_UINT64;
4982 if (fl->fl_end != NFS4_MAX_UINT64)
4983 deny->ld_length = fl->fl_end - fl->fl_start + 1;
4984 deny->ld_type = NFS4_READ_LT;
4985 if (fl->fl_type != F_RDLCK)
4986 deny->ld_type = NFS4_WRITE_LT;
4987 }
4988
4989 static struct nfs4_lockowner *
4990 find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner,
4991 struct nfs4_client *clp)
4992 {
4993 unsigned int strhashval = ownerstr_hashval(owner);
4994 struct nfs4_stateowner *so;
4995
4996 lockdep_assert_held(&clp->cl_lock);
4997
4998 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
4999 so_strhash) {
5000 if (so->so_is_open_owner)
5001 continue;
5002 if (same_owner_str(so, owner))
5003 return lockowner(nfs4_get_stateowner(so));
5004 }
5005 return NULL;
5006 }
5007
5008 static struct nfs4_lockowner *
5009 find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
5010 struct nfs4_client *clp)
5011 {
5012 struct nfs4_lockowner *lo;
5013
5014 spin_lock(&clp->cl_lock);
5015 lo = find_lockowner_str_locked(clid, owner, clp);
5016 spin_unlock(&clp->cl_lock);
5017 return lo;
5018 }
5019
5020 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5021 {
5022 unhash_lockowner_locked(lockowner(sop));
5023 }
5024
5025 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5026 {
5027 struct nfs4_lockowner *lo = lockowner(sop);
5028
5029 kmem_cache_free(lockowner_slab, lo);
5030 }
5031
5032 static const struct nfs4_stateowner_operations lockowner_ops = {
5033 .so_unhash = nfs4_unhash_lockowner,
5034 .so_free = nfs4_free_lockowner,
5035 };
5036
5037 /*
5038 * Alloc a lock owner structure.
5039 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5040 * occurred.
5041 *
5042 * strhashval = ownerstr_hashval
5043 */
5044 static struct nfs4_lockowner *
5045 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5046 struct nfs4_ol_stateid *open_stp,
5047 struct nfsd4_lock *lock)
5048 {
5049 struct nfs4_lockowner *lo, *ret;
5050
5051 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5052 if (!lo)
5053 return NULL;
5054 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5055 lo->lo_owner.so_is_open_owner = 0;
5056 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5057 lo->lo_owner.so_ops = &lockowner_ops;
5058 spin_lock(&clp->cl_lock);
5059 ret = find_lockowner_str_locked(&clp->cl_clientid,
5060 &lock->lk_new_owner, clp);
5061 if (ret == NULL) {
5062 list_add(&lo->lo_owner.so_strhash,
5063 &clp->cl_ownerstr_hashtbl[strhashval]);
5064 ret = lo;
5065 } else
5066 nfs4_free_lockowner(&lo->lo_owner);
5067 spin_unlock(&clp->cl_lock);
5068 return lo;
5069 }
5070
5071 static void
5072 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5073 struct nfs4_file *fp, struct inode *inode,
5074 struct nfs4_ol_stateid *open_stp)
5075 {
5076 struct nfs4_client *clp = lo->lo_owner.so_client;
5077
5078 lockdep_assert_held(&clp->cl_lock);
5079
5080 atomic_inc(&stp->st_stid.sc_count);
5081 stp->st_stid.sc_type = NFS4_LOCK_STID;
5082 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5083 get_nfs4_file(fp);
5084 stp->st_stid.sc_file = fp;
5085 stp->st_stid.sc_free = nfs4_free_lock_stateid;
5086 stp->st_access_bmap = 0;
5087 stp->st_deny_bmap = open_stp->st_deny_bmap;
5088 stp->st_openstp = open_stp;
5089 list_add(&stp->st_locks, &open_stp->st_locks);
5090 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5091 spin_lock(&fp->fi_lock);
5092 list_add(&stp->st_perfile, &fp->fi_stateids);
5093 spin_unlock(&fp->fi_lock);
5094 }
5095
5096 static struct nfs4_ol_stateid *
5097 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5098 {
5099 struct nfs4_ol_stateid *lst;
5100 struct nfs4_client *clp = lo->lo_owner.so_client;
5101
5102 lockdep_assert_held(&clp->cl_lock);
5103
5104 list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5105 if (lst->st_stid.sc_file == fp) {
5106 atomic_inc(&lst->st_stid.sc_count);
5107 return lst;
5108 }
5109 }
5110 return NULL;
5111 }
5112
5113 static struct nfs4_ol_stateid *
5114 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5115 struct inode *inode, struct nfs4_ol_stateid *ost,
5116 bool *new)
5117 {
5118 struct nfs4_stid *ns = NULL;
5119 struct nfs4_ol_stateid *lst;
5120 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5121 struct nfs4_client *clp = oo->oo_owner.so_client;
5122
5123 spin_lock(&clp->cl_lock);
5124 lst = find_lock_stateid(lo, fi);
5125 if (lst == NULL) {
5126 spin_unlock(&clp->cl_lock);
5127 ns = nfs4_alloc_stid(clp, stateid_slab);
5128 if (ns == NULL)
5129 return NULL;
5130
5131 spin_lock(&clp->cl_lock);
5132 lst = find_lock_stateid(lo, fi);
5133 if (likely(!lst)) {
5134 lst = openlockstateid(ns);
5135 init_lock_stateid(lst, lo, fi, inode, ost);
5136 ns = NULL;
5137 *new = true;
5138 }
5139 }
5140 spin_unlock(&clp->cl_lock);
5141 if (ns)
5142 nfs4_put_stid(ns);
5143 return lst;
5144 }
5145
5146 static int
5147 check_lock_length(u64 offset, u64 length)
5148 {
5149 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5150 LOFF_OVERFLOW(offset, length)));
5151 }
5152
5153 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5154 {
5155 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5156
5157 lockdep_assert_held(&fp->fi_lock);
5158
5159 if (test_access(access, lock_stp))
5160 return;
5161 __nfs4_file_get_access(fp, access);
5162 set_access(access, lock_stp);
5163 }
5164
5165 static __be32
5166 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5167 struct nfs4_ol_stateid *ost,
5168 struct nfsd4_lock *lock,
5169 struct nfs4_ol_stateid **lst, bool *new)
5170 {
5171 __be32 status;
5172 struct nfs4_file *fi = ost->st_stid.sc_file;
5173 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5174 struct nfs4_client *cl = oo->oo_owner.so_client;
5175 struct inode *inode = cstate->current_fh.fh_dentry->d_inode;
5176 struct nfs4_lockowner *lo;
5177 unsigned int strhashval;
5178
5179 lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, cl);
5180 if (!lo) {
5181 strhashval = ownerstr_hashval(&lock->v.new.owner);
5182 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5183 if (lo == NULL)
5184 return nfserr_jukebox;
5185 } else {
5186 /* with an existing lockowner, seqids must be the same */
5187 status = nfserr_bad_seqid;
5188 if (!cstate->minorversion &&
5189 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5190 goto out;
5191 }
5192
5193 *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5194 if (*lst == NULL) {
5195 status = nfserr_jukebox;
5196 goto out;
5197 }
5198 status = nfs_ok;
5199 out:
5200 nfs4_put_stateowner(&lo->lo_owner);
5201 return status;
5202 }
5203
5204 /*
5205 * LOCK operation
5206 */
5207 __be32
5208 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5209 struct nfsd4_lock *lock)
5210 {
5211 struct nfs4_openowner *open_sop = NULL;
5212 struct nfs4_lockowner *lock_sop = NULL;
5213 struct nfs4_ol_stateid *lock_stp = NULL;
5214 struct nfs4_ol_stateid *open_stp = NULL;
5215 struct nfs4_file *fp;
5216 struct file *filp = NULL;
5217 struct file_lock *file_lock = NULL;
5218 struct file_lock *conflock = NULL;
5219 __be32 status = 0;
5220 int lkflg;
5221 int err;
5222 bool new = false;
5223 struct net *net = SVC_NET(rqstp);
5224 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5225
5226 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5227 (long long) lock->lk_offset,
5228 (long long) lock->lk_length);
5229
5230 if (check_lock_length(lock->lk_offset, lock->lk_length))
5231 return nfserr_inval;
5232
5233 if ((status = fh_verify(rqstp, &cstate->current_fh,
5234 S_IFREG, NFSD_MAY_LOCK))) {
5235 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5236 return status;
5237 }
5238
5239 if (lock->lk_is_new) {
5240 if (nfsd4_has_session(cstate))
5241 /* See rfc 5661 18.10.3: given clientid is ignored: */
5242 memcpy(&lock->v.new.clientid,
5243 &cstate->session->se_client->cl_clientid,
5244 sizeof(clientid_t));
5245
5246 status = nfserr_stale_clientid;
5247 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5248 goto out;
5249
5250 /* validate and update open stateid and open seqid */
5251 status = nfs4_preprocess_confirmed_seqid_op(cstate,
5252 lock->lk_new_open_seqid,
5253 &lock->lk_new_open_stateid,
5254 &open_stp, nn);
5255 if (status)
5256 goto out;
5257 open_sop = openowner(open_stp->st_stateowner);
5258 status = nfserr_bad_stateid;
5259 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5260 &lock->v.new.clientid))
5261 goto out;
5262 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5263 &lock_stp, &new);
5264 } else {
5265 status = nfs4_preprocess_seqid_op(cstate,
5266 lock->lk_old_lock_seqid,
5267 &lock->lk_old_lock_stateid,
5268 NFS4_LOCK_STID, &lock_stp, nn);
5269 }
5270 if (status)
5271 goto out;
5272 lock_sop = lockowner(lock_stp->st_stateowner);
5273
5274 lkflg = setlkflg(lock->lk_type);
5275 status = nfs4_check_openmode(lock_stp, lkflg);
5276 if (status)
5277 goto out;
5278
5279 status = nfserr_grace;
5280 if (locks_in_grace(net) && !lock->lk_reclaim)
5281 goto out;
5282 status = nfserr_no_grace;
5283 if (!locks_in_grace(net) && lock->lk_reclaim)
5284 goto out;
5285
5286 file_lock = locks_alloc_lock();
5287 if (!file_lock) {
5288 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5289 status = nfserr_jukebox;
5290 goto out;
5291 }
5292
5293 fp = lock_stp->st_stid.sc_file;
5294 switch (lock->lk_type) {
5295 case NFS4_READ_LT:
5296 case NFS4_READW_LT:
5297 spin_lock(&fp->fi_lock);
5298 filp = find_readable_file_locked(fp);
5299 if (filp)
5300 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5301 spin_unlock(&fp->fi_lock);
5302 file_lock->fl_type = F_RDLCK;
5303 break;
5304 case NFS4_WRITE_LT:
5305 case NFS4_WRITEW_LT:
5306 spin_lock(&fp->fi_lock);
5307 filp = find_writeable_file_locked(fp);
5308 if (filp)
5309 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5310 spin_unlock(&fp->fi_lock);
5311 file_lock->fl_type = F_WRLCK;
5312 break;
5313 default:
5314 status = nfserr_inval;
5315 goto out;
5316 }
5317 if (!filp) {
5318 status = nfserr_openmode;
5319 goto out;
5320 }
5321
5322 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
5323 file_lock->fl_pid = current->tgid;
5324 file_lock->fl_file = filp;
5325 file_lock->fl_flags = FL_POSIX;
5326 file_lock->fl_lmops = &nfsd_posix_mng_ops;
5327 file_lock->fl_start = lock->lk_offset;
5328 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
5329 nfs4_transform_lock_offset(file_lock);
5330
5331 conflock = locks_alloc_lock();
5332 if (!conflock) {
5333 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5334 status = nfserr_jukebox;
5335 goto out;
5336 }
5337
5338 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
5339 switch (-err) {
5340 case 0: /* success! */
5341 update_stateid(&lock_stp->st_stid.sc_stateid);
5342 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
5343 sizeof(stateid_t));
5344 status = 0;
5345 break;
5346 case (EAGAIN): /* conflock holds conflicting lock */
5347 status = nfserr_denied;
5348 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5349 nfs4_set_lock_denied(conflock, &lock->lk_denied);
5350 break;
5351 case (EDEADLK):
5352 status = nfserr_deadlock;
5353 break;
5354 default:
5355 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
5356 status = nfserrno(err);
5357 break;
5358 }
5359 out:
5360 if (filp)
5361 fput(filp);
5362 if (lock_stp) {
5363 /* Bump seqid manually if the 4.0 replay owner is openowner */
5364 if (cstate->replay_owner &&
5365 cstate->replay_owner != &lock_sop->lo_owner &&
5366 seqid_mutating_err(ntohl(status)))
5367 lock_sop->lo_owner.so_seqid++;
5368
5369 /*
5370 * If this is a new, never-before-used stateid, and we are
5371 * returning an error, then just go ahead and release it.
5372 */
5373 if (status && new)
5374 release_lock_stateid(lock_stp);
5375
5376 nfs4_put_stid(&lock_stp->st_stid);
5377 }
5378 if (open_stp)
5379 nfs4_put_stid(&open_stp->st_stid);
5380 nfsd4_bump_seqid(cstate, status);
5381 if (file_lock)
5382 locks_free_lock(file_lock);
5383 if (conflock)
5384 locks_free_lock(conflock);
5385 return status;
5386 }
5387
5388 /*
5389 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
5390 * so we do a temporary open here just to get an open file to pass to
5391 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
5392 * inode operation.)
5393 */
5394 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
5395 {
5396 struct file *file;
5397 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
5398 if (!err) {
5399 err = nfserrno(vfs_test_lock(file, lock));
5400 nfsd_close(file);
5401 }
5402 return err;
5403 }
5404
5405 /*
5406 * LOCKT operation
5407 */
5408 __be32
5409 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5410 struct nfsd4_lockt *lockt)
5411 {
5412 struct file_lock *file_lock = NULL;
5413 struct nfs4_lockowner *lo = NULL;
5414 __be32 status;
5415 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5416
5417 if (locks_in_grace(SVC_NET(rqstp)))
5418 return nfserr_grace;
5419
5420 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
5421 return nfserr_inval;
5422
5423 if (!nfsd4_has_session(cstate)) {
5424 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
5425 if (status)
5426 goto out;
5427 }
5428
5429 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5430 goto out;
5431
5432 file_lock = locks_alloc_lock();
5433 if (!file_lock) {
5434 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5435 status = nfserr_jukebox;
5436 goto out;
5437 }
5438
5439 switch (lockt->lt_type) {
5440 case NFS4_READ_LT:
5441 case NFS4_READW_LT:
5442 file_lock->fl_type = F_RDLCK;
5443 break;
5444 case NFS4_WRITE_LT:
5445 case NFS4_WRITEW_LT:
5446 file_lock->fl_type = F_WRLCK;
5447 break;
5448 default:
5449 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
5450 status = nfserr_inval;
5451 goto out;
5452 }
5453
5454 lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner,
5455 cstate->clp);
5456 if (lo)
5457 file_lock->fl_owner = (fl_owner_t)lo;
5458 file_lock->fl_pid = current->tgid;
5459 file_lock->fl_flags = FL_POSIX;
5460
5461 file_lock->fl_start = lockt->lt_offset;
5462 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
5463
5464 nfs4_transform_lock_offset(file_lock);
5465
5466 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
5467 if (status)
5468 goto out;
5469
5470 if (file_lock->fl_type != F_UNLCK) {
5471 status = nfserr_denied;
5472 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
5473 }
5474 out:
5475 if (lo)
5476 nfs4_put_stateowner(&lo->lo_owner);
5477 if (file_lock)
5478 locks_free_lock(file_lock);
5479 return status;
5480 }
5481
5482 __be32
5483 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5484 struct nfsd4_locku *locku)
5485 {
5486 struct nfs4_ol_stateid *stp;
5487 struct file *filp = NULL;
5488 struct file_lock *file_lock = NULL;
5489 __be32 status;
5490 int err;
5491 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5492
5493 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
5494 (long long) locku->lu_offset,
5495 (long long) locku->lu_length);
5496
5497 if (check_lock_length(locku->lu_offset, locku->lu_length))
5498 return nfserr_inval;
5499
5500 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
5501 &locku->lu_stateid, NFS4_LOCK_STID,
5502 &stp, nn);
5503 if (status)
5504 goto out;
5505 filp = find_any_file(stp->st_stid.sc_file);
5506 if (!filp) {
5507 status = nfserr_lock_range;
5508 goto put_stateid;
5509 }
5510 file_lock = locks_alloc_lock();
5511 if (!file_lock) {
5512 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5513 status = nfserr_jukebox;
5514 goto fput;
5515 }
5516
5517 file_lock->fl_type = F_UNLCK;
5518 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
5519 file_lock->fl_pid = current->tgid;
5520 file_lock->fl_file = filp;
5521 file_lock->fl_flags = FL_POSIX;
5522 file_lock->fl_lmops = &nfsd_posix_mng_ops;
5523 file_lock->fl_start = locku->lu_offset;
5524
5525 file_lock->fl_end = last_byte_offset(locku->lu_offset,
5526 locku->lu_length);
5527 nfs4_transform_lock_offset(file_lock);
5528
5529 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
5530 if (err) {
5531 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
5532 goto out_nfserr;
5533 }
5534 update_stateid(&stp->st_stid.sc_stateid);
5535 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
5536 fput:
5537 fput(filp);
5538 put_stateid:
5539 nfs4_put_stid(&stp->st_stid);
5540 out:
5541 nfsd4_bump_seqid(cstate, status);
5542 if (file_lock)
5543 locks_free_lock(file_lock);
5544 return status;
5545
5546 out_nfserr:
5547 status = nfserrno(err);
5548 goto fput;
5549 }
5550
5551 /*
5552 * returns
5553 * true: locks held by lockowner
5554 * false: no locks held by lockowner
5555 */
5556 static bool
5557 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
5558 {
5559 struct file_lock *fl;
5560 int status = false;
5561 struct file *filp = find_any_file(fp);
5562 struct inode *inode;
5563 struct file_lock_context *flctx;
5564
5565 if (!filp) {
5566 /* Any valid lock stateid should have some sort of access */
5567 WARN_ON_ONCE(1);
5568 return status;
5569 }
5570
5571 inode = file_inode(filp);
5572 flctx = inode->i_flctx;
5573
5574 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
5575 spin_lock(&inode->i_lock);
5576 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
5577 if (fl->fl_owner == (fl_owner_t)lowner) {
5578 status = true;
5579 break;
5580 }
5581 }
5582 spin_unlock(&inode->i_lock);
5583 }
5584 fput(filp);
5585 return status;
5586 }
5587
5588 __be32
5589 nfsd4_release_lockowner(struct svc_rqst *rqstp,
5590 struct nfsd4_compound_state *cstate,
5591 struct nfsd4_release_lockowner *rlockowner)
5592 {
5593 clientid_t *clid = &rlockowner->rl_clientid;
5594 struct nfs4_stateowner *sop;
5595 struct nfs4_lockowner *lo = NULL;
5596 struct nfs4_ol_stateid *stp;
5597 struct xdr_netobj *owner = &rlockowner->rl_owner;
5598 unsigned int hashval = ownerstr_hashval(owner);
5599 __be32 status;
5600 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5601 struct nfs4_client *clp;
5602
5603 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
5604 clid->cl_boot, clid->cl_id);
5605
5606 status = lookup_clientid(clid, cstate, nn);
5607 if (status)
5608 return status;
5609
5610 clp = cstate->clp;
5611 /* Find the matching lock stateowner */
5612 spin_lock(&clp->cl_lock);
5613 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
5614 so_strhash) {
5615
5616 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
5617 continue;
5618
5619 /* see if there are still any locks associated with it */
5620 lo = lockowner(sop);
5621 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
5622 if (check_for_locks(stp->st_stid.sc_file, lo)) {
5623 status = nfserr_locks_held;
5624 spin_unlock(&clp->cl_lock);
5625 return status;
5626 }
5627 }
5628
5629 nfs4_get_stateowner(sop);
5630 break;
5631 }
5632 spin_unlock(&clp->cl_lock);
5633 if (lo)
5634 release_lockowner(lo);
5635 return status;
5636 }
5637
5638 static inline struct nfs4_client_reclaim *
5639 alloc_reclaim(void)
5640 {
5641 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
5642 }
5643
5644 bool
5645 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
5646 {
5647 struct nfs4_client_reclaim *crp;
5648
5649 crp = nfsd4_find_reclaim_client(name, nn);
5650 return (crp && crp->cr_clp);
5651 }
5652
5653 /*
5654 * failure => all reset bets are off, nfserr_no_grace...
5655 */
5656 struct nfs4_client_reclaim *
5657 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
5658 {
5659 unsigned int strhashval;
5660 struct nfs4_client_reclaim *crp;
5661
5662 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
5663 crp = alloc_reclaim();
5664 if (crp) {
5665 strhashval = clientstr_hashval(name);
5666 INIT_LIST_HEAD(&crp->cr_strhash);
5667 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
5668 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
5669 crp->cr_clp = NULL;
5670 nn->reclaim_str_hashtbl_size++;
5671 }
5672 return crp;
5673 }
5674
5675 void
5676 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
5677 {
5678 list_del(&crp->cr_strhash);
5679 kfree(crp);
5680 nn->reclaim_str_hashtbl_size--;
5681 }
5682
5683 void
5684 nfs4_release_reclaim(struct nfsd_net *nn)
5685 {
5686 struct nfs4_client_reclaim *crp = NULL;
5687 int i;
5688
5689 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5690 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
5691 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
5692 struct nfs4_client_reclaim, cr_strhash);
5693 nfs4_remove_reclaim_record(crp, nn);
5694 }
5695 }
5696 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
5697 }
5698
5699 /*
5700 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
5701 struct nfs4_client_reclaim *
5702 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
5703 {
5704 unsigned int strhashval;
5705 struct nfs4_client_reclaim *crp = NULL;
5706
5707 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
5708
5709 strhashval = clientstr_hashval(recdir);
5710 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
5711 if (same_name(crp->cr_recdir, recdir)) {
5712 return crp;
5713 }
5714 }
5715 return NULL;
5716 }
5717
5718 /*
5719 * Called from OPEN. Look for clientid in reclaim list.
5720 */
5721 __be32
5722 nfs4_check_open_reclaim(clientid_t *clid,
5723 struct nfsd4_compound_state *cstate,
5724 struct nfsd_net *nn)
5725 {
5726 __be32 status;
5727
5728 /* find clientid in conf_id_hashtbl */
5729 status = lookup_clientid(clid, cstate, nn);
5730 if (status)
5731 return nfserr_reclaim_bad;
5732
5733 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
5734 return nfserr_no_grace;
5735
5736 if (nfsd4_client_record_check(cstate->clp))
5737 return nfserr_reclaim_bad;
5738
5739 return nfs_ok;
5740 }
5741
5742 #ifdef CONFIG_NFSD_FAULT_INJECTION
5743 static inline void
5744 put_client(struct nfs4_client *clp)
5745 {
5746 atomic_dec(&clp->cl_refcount);
5747 }
5748
5749 static struct nfs4_client *
5750 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
5751 {
5752 struct nfs4_client *clp;
5753 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5754 nfsd_net_id);
5755
5756 if (!nfsd_netns_ready(nn))
5757 return NULL;
5758
5759 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5760 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
5761 return clp;
5762 }
5763 return NULL;
5764 }
5765
5766 u64
5767 nfsd_inject_print_clients(void)
5768 {
5769 struct nfs4_client *clp;
5770 u64 count = 0;
5771 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5772 nfsd_net_id);
5773 char buf[INET6_ADDRSTRLEN];
5774
5775 if (!nfsd_netns_ready(nn))
5776 return 0;
5777
5778 spin_lock(&nn->client_lock);
5779 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5780 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5781 pr_info("NFS Client: %s\n", buf);
5782 ++count;
5783 }
5784 spin_unlock(&nn->client_lock);
5785
5786 return count;
5787 }
5788
5789 u64
5790 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
5791 {
5792 u64 count = 0;
5793 struct nfs4_client *clp;
5794 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5795 nfsd_net_id);
5796
5797 if (!nfsd_netns_ready(nn))
5798 return count;
5799
5800 spin_lock(&nn->client_lock);
5801 clp = nfsd_find_client(addr, addr_size);
5802 if (clp) {
5803 if (mark_client_expired_locked(clp) == nfs_ok)
5804 ++count;
5805 else
5806 clp = NULL;
5807 }
5808 spin_unlock(&nn->client_lock);
5809
5810 if (clp)
5811 expire_client(clp);
5812
5813 return count;
5814 }
5815
5816 u64
5817 nfsd_inject_forget_clients(u64 max)
5818 {
5819 u64 count = 0;
5820 struct nfs4_client *clp, *next;
5821 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5822 nfsd_net_id);
5823 LIST_HEAD(reaplist);
5824
5825 if (!nfsd_netns_ready(nn))
5826 return count;
5827
5828 spin_lock(&nn->client_lock);
5829 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
5830 if (mark_client_expired_locked(clp) == nfs_ok) {
5831 list_add(&clp->cl_lru, &reaplist);
5832 if (max != 0 && ++count >= max)
5833 break;
5834 }
5835 }
5836 spin_unlock(&nn->client_lock);
5837
5838 list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
5839 expire_client(clp);
5840
5841 return count;
5842 }
5843
5844 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
5845 const char *type)
5846 {
5847 char buf[INET6_ADDRSTRLEN];
5848 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5849 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
5850 }
5851
5852 static void
5853 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
5854 struct list_head *collect)
5855 {
5856 struct nfs4_client *clp = lst->st_stid.sc_client;
5857 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5858 nfsd_net_id);
5859
5860 if (!collect)
5861 return;
5862
5863 lockdep_assert_held(&nn->client_lock);
5864 atomic_inc(&clp->cl_refcount);
5865 list_add(&lst->st_locks, collect);
5866 }
5867
5868 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
5869 struct list_head *collect,
5870 void (*func)(struct nfs4_ol_stateid *))
5871 {
5872 struct nfs4_openowner *oop;
5873 struct nfs4_ol_stateid *stp, *st_next;
5874 struct nfs4_ol_stateid *lst, *lst_next;
5875 u64 count = 0;
5876
5877 spin_lock(&clp->cl_lock);
5878 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
5879 list_for_each_entry_safe(stp, st_next,
5880 &oop->oo_owner.so_stateids, st_perstateowner) {
5881 list_for_each_entry_safe(lst, lst_next,
5882 &stp->st_locks, st_locks) {
5883 if (func) {
5884 func(lst);
5885 nfsd_inject_add_lock_to_list(lst,
5886 collect);
5887 }
5888 ++count;
5889 /*
5890 * Despite the fact that these functions deal
5891 * with 64-bit integers for "count", we must
5892 * ensure that it doesn't blow up the
5893 * clp->cl_refcount. Throw a warning if we
5894 * start to approach INT_MAX here.
5895 */
5896 WARN_ON_ONCE(count == (INT_MAX / 2));
5897 if (count == max)
5898 goto out;
5899 }
5900 }
5901 }
5902 out:
5903 spin_unlock(&clp->cl_lock);
5904
5905 return count;
5906 }
5907
5908 static u64
5909 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
5910 u64 max)
5911 {
5912 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
5913 }
5914
5915 static u64
5916 nfsd_print_client_locks(struct nfs4_client *clp)
5917 {
5918 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
5919 nfsd_print_count(clp, count, "locked files");
5920 return count;
5921 }
5922
5923 u64
5924 nfsd_inject_print_locks(void)
5925 {
5926 struct nfs4_client *clp;
5927 u64 count = 0;
5928 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5929 nfsd_net_id);
5930
5931 if (!nfsd_netns_ready(nn))
5932 return 0;
5933
5934 spin_lock(&nn->client_lock);
5935 list_for_each_entry(clp, &nn->client_lru, cl_lru)
5936 count += nfsd_print_client_locks(clp);
5937 spin_unlock(&nn->client_lock);
5938
5939 return count;
5940 }
5941
5942 static void
5943 nfsd_reap_locks(struct list_head *reaplist)
5944 {
5945 struct nfs4_client *clp;
5946 struct nfs4_ol_stateid *stp, *next;
5947
5948 list_for_each_entry_safe(stp, next, reaplist, st_locks) {
5949 list_del_init(&stp->st_locks);
5950 clp = stp->st_stid.sc_client;
5951 nfs4_put_stid(&stp->st_stid);
5952 put_client(clp);
5953 }
5954 }
5955
5956 u64
5957 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
5958 {
5959 unsigned int count = 0;
5960 struct nfs4_client *clp;
5961 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5962 nfsd_net_id);
5963 LIST_HEAD(reaplist);
5964
5965 if (!nfsd_netns_ready(nn))
5966 return count;
5967
5968 spin_lock(&nn->client_lock);
5969 clp = nfsd_find_client(addr, addr_size);
5970 if (clp)
5971 count = nfsd_collect_client_locks(clp, &reaplist, 0);
5972 spin_unlock(&nn->client_lock);
5973 nfsd_reap_locks(&reaplist);
5974 return count;
5975 }
5976
5977 u64
5978 nfsd_inject_forget_locks(u64 max)
5979 {
5980 u64 count = 0;
5981 struct nfs4_client *clp;
5982 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5983 nfsd_net_id);
5984 LIST_HEAD(reaplist);
5985
5986 if (!nfsd_netns_ready(nn))
5987 return count;
5988
5989 spin_lock(&nn->client_lock);
5990 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5991 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
5992 if (max != 0 && count >= max)
5993 break;
5994 }
5995 spin_unlock(&nn->client_lock);
5996 nfsd_reap_locks(&reaplist);
5997 return count;
5998 }
5999
6000 static u64
6001 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6002 struct list_head *collect,
6003 void (*func)(struct nfs4_openowner *))
6004 {
6005 struct nfs4_openowner *oop, *next;
6006 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6007 nfsd_net_id);
6008 u64 count = 0;
6009
6010 lockdep_assert_held(&nn->client_lock);
6011
6012 spin_lock(&clp->cl_lock);
6013 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6014 if (func) {
6015 func(oop);
6016 if (collect) {
6017 atomic_inc(&clp->cl_refcount);
6018 list_add(&oop->oo_perclient, collect);
6019 }
6020 }
6021 ++count;
6022 /*
6023 * Despite the fact that these functions deal with
6024 * 64-bit integers for "count", we must ensure that
6025 * it doesn't blow up the clp->cl_refcount. Throw a
6026 * warning if we start to approach INT_MAX here.
6027 */
6028 WARN_ON_ONCE(count == (INT_MAX / 2));
6029 if (count == max)
6030 break;
6031 }
6032 spin_unlock(&clp->cl_lock);
6033
6034 return count;
6035 }
6036
6037 static u64
6038 nfsd_print_client_openowners(struct nfs4_client *clp)
6039 {
6040 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6041
6042 nfsd_print_count(clp, count, "openowners");
6043 return count;
6044 }
6045
6046 static u64
6047 nfsd_collect_client_openowners(struct nfs4_client *clp,
6048 struct list_head *collect, u64 max)
6049 {
6050 return nfsd_foreach_client_openowner(clp, max, collect,
6051 unhash_openowner_locked);
6052 }
6053
6054 u64
6055 nfsd_inject_print_openowners(void)
6056 {
6057 struct nfs4_client *clp;
6058 u64 count = 0;
6059 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6060 nfsd_net_id);
6061
6062 if (!nfsd_netns_ready(nn))
6063 return 0;
6064
6065 spin_lock(&nn->client_lock);
6066 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6067 count += nfsd_print_client_openowners(clp);
6068 spin_unlock(&nn->client_lock);
6069
6070 return count;
6071 }
6072
6073 static void
6074 nfsd_reap_openowners(struct list_head *reaplist)
6075 {
6076 struct nfs4_client *clp;
6077 struct nfs4_openowner *oop, *next;
6078
6079 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6080 list_del_init(&oop->oo_perclient);
6081 clp = oop->oo_owner.so_client;
6082 release_openowner(oop);
6083 put_client(clp);
6084 }
6085 }
6086
6087 u64
6088 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6089 size_t addr_size)
6090 {
6091 unsigned int count = 0;
6092 struct nfs4_client *clp;
6093 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6094 nfsd_net_id);
6095 LIST_HEAD(reaplist);
6096
6097 if (!nfsd_netns_ready(nn))
6098 return count;
6099
6100 spin_lock(&nn->client_lock);
6101 clp = nfsd_find_client(addr, addr_size);
6102 if (clp)
6103 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6104 spin_unlock(&nn->client_lock);
6105 nfsd_reap_openowners(&reaplist);
6106 return count;
6107 }
6108
6109 u64
6110 nfsd_inject_forget_openowners(u64 max)
6111 {
6112 u64 count = 0;
6113 struct nfs4_client *clp;
6114 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6115 nfsd_net_id);
6116 LIST_HEAD(reaplist);
6117
6118 if (!nfsd_netns_ready(nn))
6119 return count;
6120
6121 spin_lock(&nn->client_lock);
6122 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6123 count += nfsd_collect_client_openowners(clp, &reaplist,
6124 max - count);
6125 if (max != 0 && count >= max)
6126 break;
6127 }
6128 spin_unlock(&nn->client_lock);
6129 nfsd_reap_openowners(&reaplist);
6130 return count;
6131 }
6132
6133 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6134 struct list_head *victims)
6135 {
6136 struct nfs4_delegation *dp, *next;
6137 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6138 nfsd_net_id);
6139 u64 count = 0;
6140
6141 lockdep_assert_held(&nn->client_lock);
6142
6143 spin_lock(&state_lock);
6144 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6145 if (victims) {
6146 /*
6147 * It's not safe to mess with delegations that have a
6148 * non-zero dl_time. They might have already been broken
6149 * and could be processed by the laundromat outside of
6150 * the state_lock. Just leave them be.
6151 */
6152 if (dp->dl_time != 0)
6153 continue;
6154
6155 atomic_inc(&clp->cl_refcount);
6156 unhash_delegation_locked(dp);
6157 list_add(&dp->dl_recall_lru, victims);
6158 }
6159 ++count;
6160 /*
6161 * Despite the fact that these functions deal with
6162 * 64-bit integers for "count", we must ensure that
6163 * it doesn't blow up the clp->cl_refcount. Throw a
6164 * warning if we start to approach INT_MAX here.
6165 */
6166 WARN_ON_ONCE(count == (INT_MAX / 2));
6167 if (count == max)
6168 break;
6169 }
6170 spin_unlock(&state_lock);
6171 return count;
6172 }
6173
6174 static u64
6175 nfsd_print_client_delegations(struct nfs4_client *clp)
6176 {
6177 u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6178
6179 nfsd_print_count(clp, count, "delegations");
6180 return count;
6181 }
6182
6183 u64
6184 nfsd_inject_print_delegations(void)
6185 {
6186 struct nfs4_client *clp;
6187 u64 count = 0;
6188 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6189 nfsd_net_id);
6190
6191 if (!nfsd_netns_ready(nn))
6192 return 0;
6193
6194 spin_lock(&nn->client_lock);
6195 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6196 count += nfsd_print_client_delegations(clp);
6197 spin_unlock(&nn->client_lock);
6198
6199 return count;
6200 }
6201
6202 static void
6203 nfsd_forget_delegations(struct list_head *reaplist)
6204 {
6205 struct nfs4_client *clp;
6206 struct nfs4_delegation *dp, *next;
6207
6208 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6209 list_del_init(&dp->dl_recall_lru);
6210 clp = dp->dl_stid.sc_client;
6211 revoke_delegation(dp);
6212 put_client(clp);
6213 }
6214 }
6215
6216 u64
6217 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6218 size_t addr_size)
6219 {
6220 u64 count = 0;
6221 struct nfs4_client *clp;
6222 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6223 nfsd_net_id);
6224 LIST_HEAD(reaplist);
6225
6226 if (!nfsd_netns_ready(nn))
6227 return count;
6228
6229 spin_lock(&nn->client_lock);
6230 clp = nfsd_find_client(addr, addr_size);
6231 if (clp)
6232 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6233 spin_unlock(&nn->client_lock);
6234
6235 nfsd_forget_delegations(&reaplist);
6236 return count;
6237 }
6238
6239 u64
6240 nfsd_inject_forget_delegations(u64 max)
6241 {
6242 u64 count = 0;
6243 struct nfs4_client *clp;
6244 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6245 nfsd_net_id);
6246 LIST_HEAD(reaplist);
6247
6248 if (!nfsd_netns_ready(nn))
6249 return count;
6250
6251 spin_lock(&nn->client_lock);
6252 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6253 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6254 if (max != 0 && count >= max)
6255 break;
6256 }
6257 spin_unlock(&nn->client_lock);
6258 nfsd_forget_delegations(&reaplist);
6259 return count;
6260 }
6261
6262 static void
6263 nfsd_recall_delegations(struct list_head *reaplist)
6264 {
6265 struct nfs4_client *clp;
6266 struct nfs4_delegation *dp, *next;
6267
6268 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6269 list_del_init(&dp->dl_recall_lru);
6270 clp = dp->dl_stid.sc_client;
6271 /*
6272 * We skipped all entries that had a zero dl_time before,
6273 * so we can now reset the dl_time back to 0. If a delegation
6274 * break comes in now, then it won't make any difference since
6275 * we're recalling it either way.
6276 */
6277 spin_lock(&state_lock);
6278 dp->dl_time = 0;
6279 spin_unlock(&state_lock);
6280 nfsd_break_one_deleg(dp);
6281 put_client(clp);
6282 }
6283 }
6284
6285 u64
6286 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
6287 size_t addr_size)
6288 {
6289 u64 count = 0;
6290 struct nfs4_client *clp;
6291 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6292 nfsd_net_id);
6293 LIST_HEAD(reaplist);
6294
6295 if (!nfsd_netns_ready(nn))
6296 return count;
6297
6298 spin_lock(&nn->client_lock);
6299 clp = nfsd_find_client(addr, addr_size);
6300 if (clp)
6301 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6302 spin_unlock(&nn->client_lock);
6303
6304 nfsd_recall_delegations(&reaplist);
6305 return count;
6306 }
6307
6308 u64
6309 nfsd_inject_recall_delegations(u64 max)
6310 {
6311 u64 count = 0;
6312 struct nfs4_client *clp, *next;
6313 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6314 nfsd_net_id);
6315 LIST_HEAD(reaplist);
6316
6317 if (!nfsd_netns_ready(nn))
6318 return count;
6319
6320 spin_lock(&nn->client_lock);
6321 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6322 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6323 if (max != 0 && ++count >= max)
6324 break;
6325 }
6326 spin_unlock(&nn->client_lock);
6327 nfsd_recall_delegations(&reaplist);
6328 return count;
6329 }
6330 #endif /* CONFIG_NFSD_FAULT_INJECTION */
6331
6332 /*
6333 * Since the lifetime of a delegation isn't limited to that of an open, a
6334 * client may quite reasonably hang on to a delegation as long as it has
6335 * the inode cached. This becomes an obvious problem the first time a
6336 * client's inode cache approaches the size of the server's total memory.
6337 *
6338 * For now we avoid this problem by imposing a hard limit on the number
6339 * of delegations, which varies according to the server's memory size.
6340 */
6341 static void
6342 set_max_delegations(void)
6343 {
6344 /*
6345 * Allow at most 4 delegations per megabyte of RAM. Quick
6346 * estimates suggest that in the worst case (where every delegation
6347 * is for a different inode), a delegation could take about 1.5K,
6348 * giving a worst case usage of about 6% of memory.
6349 */
6350 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
6351 }
6352
6353 static int nfs4_state_create_net(struct net *net)
6354 {
6355 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6356 int i;
6357
6358 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6359 CLIENT_HASH_SIZE, GFP_KERNEL);
6360 if (!nn->conf_id_hashtbl)
6361 goto err;
6362 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6363 CLIENT_HASH_SIZE, GFP_KERNEL);
6364 if (!nn->unconf_id_hashtbl)
6365 goto err_unconf_id;
6366 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
6367 SESSION_HASH_SIZE, GFP_KERNEL);
6368 if (!nn->sessionid_hashtbl)
6369 goto err_sessionid;
6370
6371 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6372 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
6373 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
6374 }
6375 for (i = 0; i < SESSION_HASH_SIZE; i++)
6376 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
6377 nn->conf_name_tree = RB_ROOT;
6378 nn->unconf_name_tree = RB_ROOT;
6379 INIT_LIST_HEAD(&nn->client_lru);
6380 INIT_LIST_HEAD(&nn->close_lru);
6381 INIT_LIST_HEAD(&nn->del_recall_lru);
6382 spin_lock_init(&nn->client_lock);
6383
6384 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
6385 get_net(net);
6386
6387 return 0;
6388
6389 err_sessionid:
6390 kfree(nn->unconf_id_hashtbl);
6391 err_unconf_id:
6392 kfree(nn->conf_id_hashtbl);
6393 err:
6394 return -ENOMEM;
6395 }
6396
6397 static void
6398 nfs4_state_destroy_net(struct net *net)
6399 {
6400 int i;
6401 struct nfs4_client *clp = NULL;
6402 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6403
6404 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6405 while (!list_empty(&nn->conf_id_hashtbl[i])) {
6406 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6407 destroy_client(clp);
6408 }
6409 }
6410
6411 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6412 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
6413 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6414 destroy_client(clp);
6415 }
6416 }
6417
6418 kfree(nn->sessionid_hashtbl);
6419 kfree(nn->unconf_id_hashtbl);
6420 kfree(nn->conf_id_hashtbl);
6421 put_net(net);
6422 }
6423
6424 int
6425 nfs4_state_start_net(struct net *net)
6426 {
6427 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6428 int ret;
6429
6430 ret = nfs4_state_create_net(net);
6431 if (ret)
6432 return ret;
6433 nn->boot_time = get_seconds();
6434 nn->grace_ended = false;
6435 locks_start_grace(net, &nn->nfsd4_manager);
6436 nfsd4_client_tracking_init(net);
6437 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
6438 nn->nfsd4_grace, net);
6439 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
6440 return 0;
6441 }
6442
6443 /* initialization to perform when the nfsd service is started: */
6444
6445 int
6446 nfs4_state_start(void)
6447 {
6448 int ret;
6449
6450 ret = set_callback_cred();
6451 if (ret)
6452 return -ENOMEM;
6453 laundry_wq = create_singlethread_workqueue("nfsd4");
6454 if (laundry_wq == NULL) {
6455 ret = -ENOMEM;
6456 goto out_recovery;
6457 }
6458 ret = nfsd4_create_callback_queue();
6459 if (ret)
6460 goto out_free_laundry;
6461
6462 set_max_delegations();
6463
6464 return 0;
6465
6466 out_free_laundry:
6467 destroy_workqueue(laundry_wq);
6468 out_recovery:
6469 return ret;
6470 }
6471
6472 void
6473 nfs4_state_shutdown_net(struct net *net)
6474 {
6475 struct nfs4_delegation *dp = NULL;
6476 struct list_head *pos, *next, reaplist;
6477 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6478
6479 cancel_delayed_work_sync(&nn->laundromat_work);
6480 locks_end_grace(&nn->nfsd4_manager);
6481
6482 INIT_LIST_HEAD(&reaplist);
6483 spin_lock(&state_lock);
6484 list_for_each_safe(pos, next, &nn->del_recall_lru) {
6485 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6486 unhash_delegation_locked(dp);
6487 list_add(&dp->dl_recall_lru, &reaplist);
6488 }
6489 spin_unlock(&state_lock);
6490 list_for_each_safe(pos, next, &reaplist) {
6491 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6492 list_del_init(&dp->dl_recall_lru);
6493 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
6494 nfs4_put_stid(&dp->dl_stid);
6495 }
6496
6497 nfsd4_client_tracking_exit(net);
6498 nfs4_state_destroy_net(net);
6499 }
6500
6501 void
6502 nfs4_state_shutdown(void)
6503 {
6504 destroy_workqueue(laundry_wq);
6505 nfsd4_destroy_callback_queue();
6506 }
6507
6508 static void
6509 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6510 {
6511 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
6512 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
6513 }
6514
6515 static void
6516 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6517 {
6518 if (cstate->minorversion) {
6519 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
6520 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6521 }
6522 }
6523
6524 void
6525 clear_current_stateid(struct nfsd4_compound_state *cstate)
6526 {
6527 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6528 }
6529
6530 /*
6531 * functions to set current state id
6532 */
6533 void
6534 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6535 {
6536 put_stateid(cstate, &odp->od_stateid);
6537 }
6538
6539 void
6540 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
6541 {
6542 put_stateid(cstate, &open->op_stateid);
6543 }
6544
6545 void
6546 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
6547 {
6548 put_stateid(cstate, &close->cl_stateid);
6549 }
6550
6551 void
6552 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
6553 {
6554 put_stateid(cstate, &lock->lk_resp_stateid);
6555 }
6556
6557 /*
6558 * functions to consume current state id
6559 */
6560
6561 void
6562 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6563 {
6564 get_stateid(cstate, &odp->od_stateid);
6565 }
6566
6567 void
6568 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
6569 {
6570 get_stateid(cstate, &drp->dr_stateid);
6571 }
6572
6573 void
6574 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
6575 {
6576 get_stateid(cstate, &fsp->fr_stateid);
6577 }
6578
6579 void
6580 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
6581 {
6582 get_stateid(cstate, &setattr->sa_stateid);
6583 }
6584
6585 void
6586 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
6587 {
6588 get_stateid(cstate, &close->cl_stateid);
6589 }
6590
6591 void
6592 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
6593 {
6594 get_stateid(cstate, &locku->lu_stateid);
6595 }
6596
6597 void
6598 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
6599 {
6600 get_stateid(cstate, &read->rd_stateid);
6601 }
6602
6603 void
6604 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
6605 {
6606 get_stateid(cstate, &write->wr_stateid);
6607 }