]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/nfsd/nfs4state.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / fs / nfsd / nfs4state.c
1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49
50 #include "netns.h"
51 #include "pnfs.h"
52
53 #define NFSDDBG_FACILITY NFSDDBG_PROC
54
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid = {
57 .si_generation = ~0,
58 .si_opaque = all_ones,
59 };
60 static const stateid_t zero_stateid = {
61 /* all fields zero */
62 };
63 static const stateid_t currentstateid = {
64 .si_generation = 1,
65 };
66 static const stateid_t close_stateid = {
67 .si_generation = 0xffffffffU,
68 };
69
70 static u64 current_sessionid = 1;
71
72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
74 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
75 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
76
77 /* forward declarations */
78 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
79 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
80
81 /* Locking: */
82
83 /*
84 * Currently used for the del_recall_lru and file hash table. In an
85 * effort to decrease the scope of the client_mutex, this spinlock may
86 * eventually cover more:
87 */
88 static DEFINE_SPINLOCK(state_lock);
89
90 enum nfsd4_st_mutex_lock_subclass {
91 OPEN_STATEID_MUTEX = 0,
92 LOCK_STATEID_MUTEX = 1,
93 };
94
95 /*
96 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
97 * the refcount on the open stateid to drop.
98 */
99 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
100
101 static struct kmem_cache *openowner_slab;
102 static struct kmem_cache *lockowner_slab;
103 static struct kmem_cache *file_slab;
104 static struct kmem_cache *stateid_slab;
105 static struct kmem_cache *deleg_slab;
106 static struct kmem_cache *odstate_slab;
107
108 static void free_session(struct nfsd4_session *);
109
110 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
111 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
112
113 static bool is_session_dead(struct nfsd4_session *ses)
114 {
115 return ses->se_flags & NFS4_SESSION_DEAD;
116 }
117
118 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
119 {
120 if (atomic_read(&ses->se_ref) > ref_held_by_me)
121 return nfserr_jukebox;
122 ses->se_flags |= NFS4_SESSION_DEAD;
123 return nfs_ok;
124 }
125
126 static bool is_client_expired(struct nfs4_client *clp)
127 {
128 return clp->cl_time == 0;
129 }
130
131 static __be32 get_client_locked(struct nfs4_client *clp)
132 {
133 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
134
135 lockdep_assert_held(&nn->client_lock);
136
137 if (is_client_expired(clp))
138 return nfserr_expired;
139 atomic_inc(&clp->cl_refcount);
140 return nfs_ok;
141 }
142
143 /* must be called under the client_lock */
144 static inline void
145 renew_client_locked(struct nfs4_client *clp)
146 {
147 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
148
149 if (is_client_expired(clp)) {
150 WARN_ON(1);
151 printk("%s: client (clientid %08x/%08x) already expired\n",
152 __func__,
153 clp->cl_clientid.cl_boot,
154 clp->cl_clientid.cl_id);
155 return;
156 }
157
158 dprintk("renewing client (clientid %08x/%08x)\n",
159 clp->cl_clientid.cl_boot,
160 clp->cl_clientid.cl_id);
161 list_move_tail(&clp->cl_lru, &nn->client_lru);
162 clp->cl_time = get_seconds();
163 }
164
165 static void put_client_renew_locked(struct nfs4_client *clp)
166 {
167 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
168
169 lockdep_assert_held(&nn->client_lock);
170
171 if (!atomic_dec_and_test(&clp->cl_refcount))
172 return;
173 if (!is_client_expired(clp))
174 renew_client_locked(clp);
175 }
176
177 static void put_client_renew(struct nfs4_client *clp)
178 {
179 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
180
181 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
182 return;
183 if (!is_client_expired(clp))
184 renew_client_locked(clp);
185 spin_unlock(&nn->client_lock);
186 }
187
188 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
189 {
190 __be32 status;
191
192 if (is_session_dead(ses))
193 return nfserr_badsession;
194 status = get_client_locked(ses->se_client);
195 if (status)
196 return status;
197 atomic_inc(&ses->se_ref);
198 return nfs_ok;
199 }
200
201 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
202 {
203 struct nfs4_client *clp = ses->se_client;
204 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
205
206 lockdep_assert_held(&nn->client_lock);
207
208 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
209 free_session(ses);
210 put_client_renew_locked(clp);
211 }
212
213 static void nfsd4_put_session(struct nfsd4_session *ses)
214 {
215 struct nfs4_client *clp = ses->se_client;
216 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
217
218 spin_lock(&nn->client_lock);
219 nfsd4_put_session_locked(ses);
220 spin_unlock(&nn->client_lock);
221 }
222
223 static struct nfsd4_blocked_lock *
224 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
225 struct nfsd_net *nn)
226 {
227 struct nfsd4_blocked_lock *cur, *found = NULL;
228
229 spin_lock(&nn->blocked_locks_lock);
230 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
231 if (fh_match(fh, &cur->nbl_fh)) {
232 list_del_init(&cur->nbl_list);
233 list_del_init(&cur->nbl_lru);
234 found = cur;
235 break;
236 }
237 }
238 spin_unlock(&nn->blocked_locks_lock);
239 if (found)
240 posix_unblock_lock(&found->nbl_lock);
241 return found;
242 }
243
244 static struct nfsd4_blocked_lock *
245 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
246 struct nfsd_net *nn)
247 {
248 struct nfsd4_blocked_lock *nbl;
249
250 nbl = find_blocked_lock(lo, fh, nn);
251 if (!nbl) {
252 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
253 if (nbl) {
254 fh_copy_shallow(&nbl->nbl_fh, fh);
255 locks_init_lock(&nbl->nbl_lock);
256 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
257 &nfsd4_cb_notify_lock_ops,
258 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
259 }
260 }
261 return nbl;
262 }
263
264 static void
265 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
266 {
267 locks_release_private(&nbl->nbl_lock);
268 kfree(nbl);
269 }
270
271 static int
272 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
273 {
274 /*
275 * Since this is just an optimization, we don't try very hard if it
276 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
277 * just quit trying on anything else.
278 */
279 switch (task->tk_status) {
280 case -NFS4ERR_DELAY:
281 rpc_delay(task, 1 * HZ);
282 return 0;
283 default:
284 return 1;
285 }
286 }
287
288 static void
289 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
290 {
291 struct nfsd4_blocked_lock *nbl = container_of(cb,
292 struct nfsd4_blocked_lock, nbl_cb);
293
294 free_blocked_lock(nbl);
295 }
296
297 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
298 .done = nfsd4_cb_notify_lock_done,
299 .release = nfsd4_cb_notify_lock_release,
300 };
301
302 static inline struct nfs4_stateowner *
303 nfs4_get_stateowner(struct nfs4_stateowner *sop)
304 {
305 atomic_inc(&sop->so_count);
306 return sop;
307 }
308
309 static int
310 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
311 {
312 return (sop->so_owner.len == owner->len) &&
313 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
314 }
315
316 static struct nfs4_openowner *
317 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
318 struct nfs4_client *clp)
319 {
320 struct nfs4_stateowner *so;
321
322 lockdep_assert_held(&clp->cl_lock);
323
324 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
325 so_strhash) {
326 if (!so->so_is_open_owner)
327 continue;
328 if (same_owner_str(so, &open->op_owner))
329 return openowner(nfs4_get_stateowner(so));
330 }
331 return NULL;
332 }
333
334 static struct nfs4_openowner *
335 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
336 struct nfs4_client *clp)
337 {
338 struct nfs4_openowner *oo;
339
340 spin_lock(&clp->cl_lock);
341 oo = find_openstateowner_str_locked(hashval, open, clp);
342 spin_unlock(&clp->cl_lock);
343 return oo;
344 }
345
346 static inline u32
347 opaque_hashval(const void *ptr, int nbytes)
348 {
349 unsigned char *cptr = (unsigned char *) ptr;
350
351 u32 x = 0;
352 while (nbytes--) {
353 x *= 37;
354 x += *cptr++;
355 }
356 return x;
357 }
358
359 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
360 {
361 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
362
363 kmem_cache_free(file_slab, fp);
364 }
365
366 void
367 put_nfs4_file(struct nfs4_file *fi)
368 {
369 might_lock(&state_lock);
370
371 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
372 hlist_del_rcu(&fi->fi_hash);
373 spin_unlock(&state_lock);
374 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
375 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
376 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
377 }
378 }
379
380 static struct file *
381 __nfs4_get_fd(struct nfs4_file *f, int oflag)
382 {
383 if (f->fi_fds[oflag])
384 return get_file(f->fi_fds[oflag]);
385 return NULL;
386 }
387
388 static struct file *
389 find_writeable_file_locked(struct nfs4_file *f)
390 {
391 struct file *ret;
392
393 lockdep_assert_held(&f->fi_lock);
394
395 ret = __nfs4_get_fd(f, O_WRONLY);
396 if (!ret)
397 ret = __nfs4_get_fd(f, O_RDWR);
398 return ret;
399 }
400
401 static struct file *
402 find_writeable_file(struct nfs4_file *f)
403 {
404 struct file *ret;
405
406 spin_lock(&f->fi_lock);
407 ret = find_writeable_file_locked(f);
408 spin_unlock(&f->fi_lock);
409
410 return ret;
411 }
412
413 static struct file *find_readable_file_locked(struct nfs4_file *f)
414 {
415 struct file *ret;
416
417 lockdep_assert_held(&f->fi_lock);
418
419 ret = __nfs4_get_fd(f, O_RDONLY);
420 if (!ret)
421 ret = __nfs4_get_fd(f, O_RDWR);
422 return ret;
423 }
424
425 static struct file *
426 find_readable_file(struct nfs4_file *f)
427 {
428 struct file *ret;
429
430 spin_lock(&f->fi_lock);
431 ret = find_readable_file_locked(f);
432 spin_unlock(&f->fi_lock);
433
434 return ret;
435 }
436
437 struct file *
438 find_any_file(struct nfs4_file *f)
439 {
440 struct file *ret;
441
442 spin_lock(&f->fi_lock);
443 ret = __nfs4_get_fd(f, O_RDWR);
444 if (!ret) {
445 ret = __nfs4_get_fd(f, O_WRONLY);
446 if (!ret)
447 ret = __nfs4_get_fd(f, O_RDONLY);
448 }
449 spin_unlock(&f->fi_lock);
450 return ret;
451 }
452
453 static atomic_long_t num_delegations;
454 unsigned long max_delegations;
455
456 /*
457 * Open owner state (share locks)
458 */
459
460 /* hash tables for lock and open owners */
461 #define OWNER_HASH_BITS 8
462 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
463 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
464
465 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
466 {
467 unsigned int ret;
468
469 ret = opaque_hashval(ownername->data, ownername->len);
470 return ret & OWNER_HASH_MASK;
471 }
472
473 /* hash table for nfs4_file */
474 #define FILE_HASH_BITS 8
475 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
476
477 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
478 {
479 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
480 }
481
482 static unsigned int file_hashval(struct knfsd_fh *fh)
483 {
484 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
485 }
486
487 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
488
489 static void
490 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
491 {
492 lockdep_assert_held(&fp->fi_lock);
493
494 if (access & NFS4_SHARE_ACCESS_WRITE)
495 atomic_inc(&fp->fi_access[O_WRONLY]);
496 if (access & NFS4_SHARE_ACCESS_READ)
497 atomic_inc(&fp->fi_access[O_RDONLY]);
498 }
499
500 static __be32
501 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
502 {
503 lockdep_assert_held(&fp->fi_lock);
504
505 /* Does this access mode make sense? */
506 if (access & ~NFS4_SHARE_ACCESS_BOTH)
507 return nfserr_inval;
508
509 /* Does it conflict with a deny mode already set? */
510 if ((access & fp->fi_share_deny) != 0)
511 return nfserr_share_denied;
512
513 __nfs4_file_get_access(fp, access);
514 return nfs_ok;
515 }
516
517 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
518 {
519 /* Common case is that there is no deny mode. */
520 if (deny) {
521 /* Does this deny mode make sense? */
522 if (deny & ~NFS4_SHARE_DENY_BOTH)
523 return nfserr_inval;
524
525 if ((deny & NFS4_SHARE_DENY_READ) &&
526 atomic_read(&fp->fi_access[O_RDONLY]))
527 return nfserr_share_denied;
528
529 if ((deny & NFS4_SHARE_DENY_WRITE) &&
530 atomic_read(&fp->fi_access[O_WRONLY]))
531 return nfserr_share_denied;
532 }
533 return nfs_ok;
534 }
535
536 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
537 {
538 might_lock(&fp->fi_lock);
539
540 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
541 struct file *f1 = NULL;
542 struct file *f2 = NULL;
543
544 swap(f1, fp->fi_fds[oflag]);
545 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
546 swap(f2, fp->fi_fds[O_RDWR]);
547 spin_unlock(&fp->fi_lock);
548 if (f1)
549 fput(f1);
550 if (f2)
551 fput(f2);
552 }
553 }
554
555 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
556 {
557 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
558
559 if (access & NFS4_SHARE_ACCESS_WRITE)
560 __nfs4_file_put_access(fp, O_WRONLY);
561 if (access & NFS4_SHARE_ACCESS_READ)
562 __nfs4_file_put_access(fp, O_RDONLY);
563 }
564
565 /*
566 * Allocate a new open/delegation state counter. This is needed for
567 * pNFS for proper return on close semantics.
568 *
569 * Note that we only allocate it for pNFS-enabled exports, otherwise
570 * all pointers to struct nfs4_clnt_odstate are always NULL.
571 */
572 static struct nfs4_clnt_odstate *
573 alloc_clnt_odstate(struct nfs4_client *clp)
574 {
575 struct nfs4_clnt_odstate *co;
576
577 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
578 if (co) {
579 co->co_client = clp;
580 refcount_set(&co->co_odcount, 1);
581 }
582 return co;
583 }
584
585 static void
586 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
587 {
588 struct nfs4_file *fp = co->co_file;
589
590 lockdep_assert_held(&fp->fi_lock);
591 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
592 }
593
594 static inline void
595 get_clnt_odstate(struct nfs4_clnt_odstate *co)
596 {
597 if (co)
598 refcount_inc(&co->co_odcount);
599 }
600
601 static void
602 put_clnt_odstate(struct nfs4_clnt_odstate *co)
603 {
604 struct nfs4_file *fp;
605
606 if (!co)
607 return;
608
609 fp = co->co_file;
610 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
611 list_del(&co->co_perfile);
612 spin_unlock(&fp->fi_lock);
613
614 nfsd4_return_all_file_layouts(co->co_client, fp);
615 kmem_cache_free(odstate_slab, co);
616 }
617 }
618
619 static struct nfs4_clnt_odstate *
620 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
621 {
622 struct nfs4_clnt_odstate *co;
623 struct nfs4_client *cl;
624
625 if (!new)
626 return NULL;
627
628 cl = new->co_client;
629
630 spin_lock(&fp->fi_lock);
631 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
632 if (co->co_client == cl) {
633 get_clnt_odstate(co);
634 goto out;
635 }
636 }
637 co = new;
638 co->co_file = fp;
639 hash_clnt_odstate_locked(new);
640 out:
641 spin_unlock(&fp->fi_lock);
642 return co;
643 }
644
645 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
646 void (*sc_free)(struct nfs4_stid *))
647 {
648 struct nfs4_stid *stid;
649 int new_id;
650
651 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
652 if (!stid)
653 return NULL;
654
655 idr_preload(GFP_KERNEL);
656 spin_lock(&cl->cl_lock);
657 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
658 spin_unlock(&cl->cl_lock);
659 idr_preload_end();
660 if (new_id < 0)
661 goto out_free;
662
663 stid->sc_free = sc_free;
664 stid->sc_client = cl;
665 stid->sc_stateid.si_opaque.so_id = new_id;
666 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
667 /* Will be incremented before return to client: */
668 refcount_set(&stid->sc_count, 1);
669 spin_lock_init(&stid->sc_lock);
670
671 /*
672 * It shouldn't be a problem to reuse an opaque stateid value.
673 * I don't think it is for 4.1. But with 4.0 I worry that, for
674 * example, a stray write retransmission could be accepted by
675 * the server when it should have been rejected. Therefore,
676 * adopt a trick from the sctp code to attempt to maximize the
677 * amount of time until an id is reused, by ensuring they always
678 * "increase" (mod INT_MAX):
679 */
680 return stid;
681 out_free:
682 kmem_cache_free(slab, stid);
683 return NULL;
684 }
685
686 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
687 {
688 struct nfs4_stid *stid;
689
690 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
691 if (!stid)
692 return NULL;
693
694 return openlockstateid(stid);
695 }
696
697 static void nfs4_free_deleg(struct nfs4_stid *stid)
698 {
699 kmem_cache_free(deleg_slab, stid);
700 atomic_long_dec(&num_delegations);
701 }
702
703 /*
704 * When we recall a delegation, we should be careful not to hand it
705 * out again straight away.
706 * To ensure this we keep a pair of bloom filters ('new' and 'old')
707 * in which the filehandles of recalled delegations are "stored".
708 * If a filehandle appear in either filter, a delegation is blocked.
709 * When a delegation is recalled, the filehandle is stored in the "new"
710 * filter.
711 * Every 30 seconds we swap the filters and clear the "new" one,
712 * unless both are empty of course.
713 *
714 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
715 * low 3 bytes as hash-table indices.
716 *
717 * 'blocked_delegations_lock', which is always taken in block_delegations(),
718 * is used to manage concurrent access. Testing does not need the lock
719 * except when swapping the two filters.
720 */
721 static DEFINE_SPINLOCK(blocked_delegations_lock);
722 static struct bloom_pair {
723 int entries, old_entries;
724 time_t swap_time;
725 int new; /* index into 'set' */
726 DECLARE_BITMAP(set[2], 256);
727 } blocked_delegations;
728
729 static int delegation_blocked(struct knfsd_fh *fh)
730 {
731 u32 hash;
732 struct bloom_pair *bd = &blocked_delegations;
733
734 if (bd->entries == 0)
735 return 0;
736 if (seconds_since_boot() - bd->swap_time > 30) {
737 spin_lock(&blocked_delegations_lock);
738 if (seconds_since_boot() - bd->swap_time > 30) {
739 bd->entries -= bd->old_entries;
740 bd->old_entries = bd->entries;
741 memset(bd->set[bd->new], 0,
742 sizeof(bd->set[0]));
743 bd->new = 1-bd->new;
744 bd->swap_time = seconds_since_boot();
745 }
746 spin_unlock(&blocked_delegations_lock);
747 }
748 hash = jhash(&fh->fh_base, fh->fh_size, 0);
749 if (test_bit(hash&255, bd->set[0]) &&
750 test_bit((hash>>8)&255, bd->set[0]) &&
751 test_bit((hash>>16)&255, bd->set[0]))
752 return 1;
753
754 if (test_bit(hash&255, bd->set[1]) &&
755 test_bit((hash>>8)&255, bd->set[1]) &&
756 test_bit((hash>>16)&255, bd->set[1]))
757 return 1;
758
759 return 0;
760 }
761
762 static void block_delegations(struct knfsd_fh *fh)
763 {
764 u32 hash;
765 struct bloom_pair *bd = &blocked_delegations;
766
767 hash = jhash(&fh->fh_base, fh->fh_size, 0);
768
769 spin_lock(&blocked_delegations_lock);
770 __set_bit(hash&255, bd->set[bd->new]);
771 __set_bit((hash>>8)&255, bd->set[bd->new]);
772 __set_bit((hash>>16)&255, bd->set[bd->new]);
773 if (bd->entries == 0)
774 bd->swap_time = seconds_since_boot();
775 bd->entries += 1;
776 spin_unlock(&blocked_delegations_lock);
777 }
778
779 static struct nfs4_delegation *
780 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
781 struct nfs4_clnt_odstate *odstate)
782 {
783 struct nfs4_delegation *dp;
784 long n;
785
786 dprintk("NFSD alloc_init_deleg\n");
787 n = atomic_long_inc_return(&num_delegations);
788 if (n < 0 || n > max_delegations)
789 goto out_dec;
790 if (delegation_blocked(&current_fh->fh_handle))
791 goto out_dec;
792 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
793 if (dp == NULL)
794 goto out_dec;
795
796 /*
797 * delegation seqid's are never incremented. The 4.1 special
798 * meaning of seqid 0 isn't meaningful, really, but let's avoid
799 * 0 anyway just for consistency and use 1:
800 */
801 dp->dl_stid.sc_stateid.si_generation = 1;
802 INIT_LIST_HEAD(&dp->dl_perfile);
803 INIT_LIST_HEAD(&dp->dl_perclnt);
804 INIT_LIST_HEAD(&dp->dl_recall_lru);
805 dp->dl_clnt_odstate = odstate;
806 get_clnt_odstate(odstate);
807 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
808 dp->dl_retries = 1;
809 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
810 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
811 return dp;
812 out_dec:
813 atomic_long_dec(&num_delegations);
814 return NULL;
815 }
816
817 void
818 nfs4_put_stid(struct nfs4_stid *s)
819 {
820 struct nfs4_file *fp = s->sc_file;
821 struct nfs4_client *clp = s->sc_client;
822
823 might_lock(&clp->cl_lock);
824
825 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
826 wake_up_all(&close_wq);
827 return;
828 }
829 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
830 spin_unlock(&clp->cl_lock);
831 s->sc_free(s);
832 if (fp)
833 put_nfs4_file(fp);
834 }
835
836 void
837 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
838 {
839 stateid_t *src = &stid->sc_stateid;
840
841 spin_lock(&stid->sc_lock);
842 if (unlikely(++src->si_generation == 0))
843 src->si_generation = 1;
844 memcpy(dst, src, sizeof(*dst));
845 spin_unlock(&stid->sc_lock);
846 }
847
848 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
849 {
850 struct file *filp = NULL;
851
852 spin_lock(&fp->fi_lock);
853 if (fp->fi_deleg_file && --fp->fi_delegees == 0)
854 swap(filp, fp->fi_deleg_file);
855 spin_unlock(&fp->fi_lock);
856
857 if (filp) {
858 vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
859 fput(filp);
860 }
861 }
862
863 void nfs4_unhash_stid(struct nfs4_stid *s)
864 {
865 s->sc_type = 0;
866 }
867
868 /**
869 * nfs4_get_existing_delegation - Discover if this delegation already exists
870 * @clp: a pointer to the nfs4_client we're granting a delegation to
871 * @fp: a pointer to the nfs4_file we're granting a delegation on
872 *
873 * Return:
874 * On success: NULL if an existing delegation was not found.
875 *
876 * On error: -EAGAIN if one was previously granted to this nfs4_client
877 * for this nfs4_file.
878 *
879 */
880
881 static int
882 nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
883 {
884 struct nfs4_delegation *searchdp = NULL;
885 struct nfs4_client *searchclp = NULL;
886
887 lockdep_assert_held(&state_lock);
888 lockdep_assert_held(&fp->fi_lock);
889
890 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
891 searchclp = searchdp->dl_stid.sc_client;
892 if (clp == searchclp) {
893 return -EAGAIN;
894 }
895 }
896 return 0;
897 }
898
899 /**
900 * hash_delegation_locked - Add a delegation to the appropriate lists
901 * @dp: a pointer to the nfs4_delegation we are adding.
902 * @fp: a pointer to the nfs4_file we're granting a delegation on
903 *
904 * Return:
905 * On success: NULL if the delegation was successfully hashed.
906 *
907 * On error: -EAGAIN if one was previously granted to this
908 * nfs4_client for this nfs4_file. Delegation is not hashed.
909 *
910 */
911
912 static int
913 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
914 {
915 int status;
916 struct nfs4_client *clp = dp->dl_stid.sc_client;
917
918 lockdep_assert_held(&state_lock);
919 lockdep_assert_held(&fp->fi_lock);
920
921 status = nfs4_get_existing_delegation(clp, fp);
922 if (status)
923 return status;
924 ++fp->fi_delegees;
925 refcount_inc(&dp->dl_stid.sc_count);
926 dp->dl_stid.sc_type = NFS4_DELEG_STID;
927 list_add(&dp->dl_perfile, &fp->fi_delegations);
928 list_add(&dp->dl_perclnt, &clp->cl_delegations);
929 return 0;
930 }
931
932 static bool
933 unhash_delegation_locked(struct nfs4_delegation *dp)
934 {
935 struct nfs4_file *fp = dp->dl_stid.sc_file;
936
937 lockdep_assert_held(&state_lock);
938
939 if (list_empty(&dp->dl_perfile))
940 return false;
941
942 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
943 /* Ensure that deleg break won't try to requeue it */
944 ++dp->dl_time;
945 spin_lock(&fp->fi_lock);
946 list_del_init(&dp->dl_perclnt);
947 list_del_init(&dp->dl_recall_lru);
948 list_del_init(&dp->dl_perfile);
949 spin_unlock(&fp->fi_lock);
950 return true;
951 }
952
953 static void destroy_delegation(struct nfs4_delegation *dp)
954 {
955 bool unhashed;
956
957 spin_lock(&state_lock);
958 unhashed = unhash_delegation_locked(dp);
959 spin_unlock(&state_lock);
960 if (unhashed) {
961 put_clnt_odstate(dp->dl_clnt_odstate);
962 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
963 nfs4_put_stid(&dp->dl_stid);
964 }
965 }
966
967 static void revoke_delegation(struct nfs4_delegation *dp)
968 {
969 struct nfs4_client *clp = dp->dl_stid.sc_client;
970
971 WARN_ON(!list_empty(&dp->dl_recall_lru));
972
973 put_clnt_odstate(dp->dl_clnt_odstate);
974 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
975
976 if (clp->cl_minorversion == 0)
977 nfs4_put_stid(&dp->dl_stid);
978 else {
979 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
980 spin_lock(&clp->cl_lock);
981 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
982 spin_unlock(&clp->cl_lock);
983 }
984 }
985
986 /*
987 * SETCLIENTID state
988 */
989
990 static unsigned int clientid_hashval(u32 id)
991 {
992 return id & CLIENT_HASH_MASK;
993 }
994
995 static unsigned int clientstr_hashval(const char *name)
996 {
997 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
998 }
999
1000 /*
1001 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1002 * st_{access,deny}_bmap field of the stateid, in order to track not
1003 * only what share bits are currently in force, but also what
1004 * combinations of share bits previous opens have used. This allows us
1005 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1006 * return an error if the client attempt to downgrade to a combination
1007 * of share bits not explicable by closing some of its previous opens.
1008 *
1009 * XXX: This enforcement is actually incomplete, since we don't keep
1010 * track of access/deny bit combinations; so, e.g., we allow:
1011 *
1012 * OPEN allow read, deny write
1013 * OPEN allow both, deny none
1014 * DOWNGRADE allow read, deny none
1015 *
1016 * which we should reject.
1017 */
1018 static unsigned int
1019 bmap_to_share_mode(unsigned long bmap) {
1020 int i;
1021 unsigned int access = 0;
1022
1023 for (i = 1; i < 4; i++) {
1024 if (test_bit(i, &bmap))
1025 access |= i;
1026 }
1027 return access;
1028 }
1029
1030 /* set share access for a given stateid */
1031 static inline void
1032 set_access(u32 access, struct nfs4_ol_stateid *stp)
1033 {
1034 unsigned char mask = 1 << access;
1035
1036 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1037 stp->st_access_bmap |= mask;
1038 }
1039
1040 /* clear share access for a given stateid */
1041 static inline void
1042 clear_access(u32 access, struct nfs4_ol_stateid *stp)
1043 {
1044 unsigned char mask = 1 << access;
1045
1046 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1047 stp->st_access_bmap &= ~mask;
1048 }
1049
1050 /* test whether a given stateid has access */
1051 static inline bool
1052 test_access(u32 access, struct nfs4_ol_stateid *stp)
1053 {
1054 unsigned char mask = 1 << access;
1055
1056 return (bool)(stp->st_access_bmap & mask);
1057 }
1058
1059 /* set share deny for a given stateid */
1060 static inline void
1061 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1062 {
1063 unsigned char mask = 1 << deny;
1064
1065 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1066 stp->st_deny_bmap |= mask;
1067 }
1068
1069 /* clear share deny for a given stateid */
1070 static inline void
1071 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1072 {
1073 unsigned char mask = 1 << deny;
1074
1075 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1076 stp->st_deny_bmap &= ~mask;
1077 }
1078
1079 /* test whether a given stateid is denying specific access */
1080 static inline bool
1081 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1082 {
1083 unsigned char mask = 1 << deny;
1084
1085 return (bool)(stp->st_deny_bmap & mask);
1086 }
1087
1088 static int nfs4_access_to_omode(u32 access)
1089 {
1090 switch (access & NFS4_SHARE_ACCESS_BOTH) {
1091 case NFS4_SHARE_ACCESS_READ:
1092 return O_RDONLY;
1093 case NFS4_SHARE_ACCESS_WRITE:
1094 return O_WRONLY;
1095 case NFS4_SHARE_ACCESS_BOTH:
1096 return O_RDWR;
1097 }
1098 WARN_ON_ONCE(1);
1099 return O_RDONLY;
1100 }
1101
1102 /*
1103 * A stateid that had a deny mode associated with it is being released
1104 * or downgraded. Recalculate the deny mode on the file.
1105 */
1106 static void
1107 recalculate_deny_mode(struct nfs4_file *fp)
1108 {
1109 struct nfs4_ol_stateid *stp;
1110
1111 spin_lock(&fp->fi_lock);
1112 fp->fi_share_deny = 0;
1113 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1114 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1115 spin_unlock(&fp->fi_lock);
1116 }
1117
1118 static void
1119 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1120 {
1121 int i;
1122 bool change = false;
1123
1124 for (i = 1; i < 4; i++) {
1125 if ((i & deny) != i) {
1126 change = true;
1127 clear_deny(i, stp);
1128 }
1129 }
1130
1131 /* Recalculate per-file deny mode if there was a change */
1132 if (change)
1133 recalculate_deny_mode(stp->st_stid.sc_file);
1134 }
1135
1136 /* release all access and file references for a given stateid */
1137 static void
1138 release_all_access(struct nfs4_ol_stateid *stp)
1139 {
1140 int i;
1141 struct nfs4_file *fp = stp->st_stid.sc_file;
1142
1143 if (fp && stp->st_deny_bmap != 0)
1144 recalculate_deny_mode(fp);
1145
1146 for (i = 1; i < 4; i++) {
1147 if (test_access(i, stp))
1148 nfs4_file_put_access(stp->st_stid.sc_file, i);
1149 clear_access(i, stp);
1150 }
1151 }
1152
1153 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1154 {
1155 kfree(sop->so_owner.data);
1156 sop->so_ops->so_free(sop);
1157 }
1158
1159 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1160 {
1161 struct nfs4_client *clp = sop->so_client;
1162
1163 might_lock(&clp->cl_lock);
1164
1165 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1166 return;
1167 sop->so_ops->so_unhash(sop);
1168 spin_unlock(&clp->cl_lock);
1169 nfs4_free_stateowner(sop);
1170 }
1171
1172 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1173 {
1174 struct nfs4_file *fp = stp->st_stid.sc_file;
1175
1176 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1177
1178 if (list_empty(&stp->st_perfile))
1179 return false;
1180
1181 spin_lock(&fp->fi_lock);
1182 list_del_init(&stp->st_perfile);
1183 spin_unlock(&fp->fi_lock);
1184 list_del(&stp->st_perstateowner);
1185 return true;
1186 }
1187
1188 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1189 {
1190 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1191
1192 put_clnt_odstate(stp->st_clnt_odstate);
1193 release_all_access(stp);
1194 if (stp->st_stateowner)
1195 nfs4_put_stateowner(stp->st_stateowner);
1196 kmem_cache_free(stateid_slab, stid);
1197 }
1198
1199 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1200 {
1201 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1202 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1203 struct file *file;
1204
1205 file = find_any_file(stp->st_stid.sc_file);
1206 if (file)
1207 filp_close(file, (fl_owner_t)lo);
1208 nfs4_free_ol_stateid(stid);
1209 }
1210
1211 /*
1212 * Put the persistent reference to an already unhashed generic stateid, while
1213 * holding the cl_lock. If it's the last reference, then put it onto the
1214 * reaplist for later destruction.
1215 */
1216 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1217 struct list_head *reaplist)
1218 {
1219 struct nfs4_stid *s = &stp->st_stid;
1220 struct nfs4_client *clp = s->sc_client;
1221
1222 lockdep_assert_held(&clp->cl_lock);
1223
1224 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1225
1226 if (!refcount_dec_and_test(&s->sc_count)) {
1227 wake_up_all(&close_wq);
1228 return;
1229 }
1230
1231 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1232 list_add(&stp->st_locks, reaplist);
1233 }
1234
1235 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1236 {
1237 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1238
1239 list_del_init(&stp->st_locks);
1240 nfs4_unhash_stid(&stp->st_stid);
1241 return unhash_ol_stateid(stp);
1242 }
1243
1244 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1245 {
1246 struct nfs4_client *clp = stp->st_stid.sc_client;
1247 bool unhashed;
1248
1249 spin_lock(&clp->cl_lock);
1250 unhashed = unhash_lock_stateid(stp);
1251 spin_unlock(&clp->cl_lock);
1252 if (unhashed)
1253 nfs4_put_stid(&stp->st_stid);
1254 }
1255
1256 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1257 {
1258 struct nfs4_client *clp = lo->lo_owner.so_client;
1259
1260 lockdep_assert_held(&clp->cl_lock);
1261
1262 list_del_init(&lo->lo_owner.so_strhash);
1263 }
1264
1265 /*
1266 * Free a list of generic stateids that were collected earlier after being
1267 * fully unhashed.
1268 */
1269 static void
1270 free_ol_stateid_reaplist(struct list_head *reaplist)
1271 {
1272 struct nfs4_ol_stateid *stp;
1273 struct nfs4_file *fp;
1274
1275 might_sleep();
1276
1277 while (!list_empty(reaplist)) {
1278 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1279 st_locks);
1280 list_del(&stp->st_locks);
1281 fp = stp->st_stid.sc_file;
1282 stp->st_stid.sc_free(&stp->st_stid);
1283 if (fp)
1284 put_nfs4_file(fp);
1285 }
1286 }
1287
1288 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1289 struct list_head *reaplist)
1290 {
1291 struct nfs4_ol_stateid *stp;
1292
1293 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1294
1295 while (!list_empty(&open_stp->st_locks)) {
1296 stp = list_entry(open_stp->st_locks.next,
1297 struct nfs4_ol_stateid, st_locks);
1298 WARN_ON(!unhash_lock_stateid(stp));
1299 put_ol_stateid_locked(stp, reaplist);
1300 }
1301 }
1302
1303 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1304 struct list_head *reaplist)
1305 {
1306 bool unhashed;
1307
1308 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1309
1310 unhashed = unhash_ol_stateid(stp);
1311 release_open_stateid_locks(stp, reaplist);
1312 return unhashed;
1313 }
1314
1315 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1316 {
1317 LIST_HEAD(reaplist);
1318
1319 spin_lock(&stp->st_stid.sc_client->cl_lock);
1320 if (unhash_open_stateid(stp, &reaplist))
1321 put_ol_stateid_locked(stp, &reaplist);
1322 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1323 free_ol_stateid_reaplist(&reaplist);
1324 }
1325
1326 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1327 {
1328 struct nfs4_client *clp = oo->oo_owner.so_client;
1329
1330 lockdep_assert_held(&clp->cl_lock);
1331
1332 list_del_init(&oo->oo_owner.so_strhash);
1333 list_del_init(&oo->oo_perclient);
1334 }
1335
1336 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1337 {
1338 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1339 nfsd_net_id);
1340 struct nfs4_ol_stateid *s;
1341
1342 spin_lock(&nn->client_lock);
1343 s = oo->oo_last_closed_stid;
1344 if (s) {
1345 list_del_init(&oo->oo_close_lru);
1346 oo->oo_last_closed_stid = NULL;
1347 }
1348 spin_unlock(&nn->client_lock);
1349 if (s)
1350 nfs4_put_stid(&s->st_stid);
1351 }
1352
1353 static void release_openowner(struct nfs4_openowner *oo)
1354 {
1355 struct nfs4_ol_stateid *stp;
1356 struct nfs4_client *clp = oo->oo_owner.so_client;
1357 struct list_head reaplist;
1358
1359 INIT_LIST_HEAD(&reaplist);
1360
1361 spin_lock(&clp->cl_lock);
1362 unhash_openowner_locked(oo);
1363 while (!list_empty(&oo->oo_owner.so_stateids)) {
1364 stp = list_first_entry(&oo->oo_owner.so_stateids,
1365 struct nfs4_ol_stateid, st_perstateowner);
1366 if (unhash_open_stateid(stp, &reaplist))
1367 put_ol_stateid_locked(stp, &reaplist);
1368 }
1369 spin_unlock(&clp->cl_lock);
1370 free_ol_stateid_reaplist(&reaplist);
1371 release_last_closed_stateid(oo);
1372 nfs4_put_stateowner(&oo->oo_owner);
1373 }
1374
1375 static inline int
1376 hash_sessionid(struct nfs4_sessionid *sessionid)
1377 {
1378 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1379
1380 return sid->sequence % SESSION_HASH_SIZE;
1381 }
1382
1383 #ifdef CONFIG_SUNRPC_DEBUG
1384 static inline void
1385 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1386 {
1387 u32 *ptr = (u32 *)(&sessionid->data[0]);
1388 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1389 }
1390 #else
1391 static inline void
1392 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1393 {
1394 }
1395 #endif
1396
1397 /*
1398 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1399 * won't be used for replay.
1400 */
1401 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1402 {
1403 struct nfs4_stateowner *so = cstate->replay_owner;
1404
1405 if (nfserr == nfserr_replay_me)
1406 return;
1407
1408 if (!seqid_mutating_err(ntohl(nfserr))) {
1409 nfsd4_cstate_clear_replay(cstate);
1410 return;
1411 }
1412 if (!so)
1413 return;
1414 if (so->so_is_open_owner)
1415 release_last_closed_stateid(openowner(so));
1416 so->so_seqid++;
1417 return;
1418 }
1419
1420 static void
1421 gen_sessionid(struct nfsd4_session *ses)
1422 {
1423 struct nfs4_client *clp = ses->se_client;
1424 struct nfsd4_sessionid *sid;
1425
1426 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1427 sid->clientid = clp->cl_clientid;
1428 sid->sequence = current_sessionid++;
1429 sid->reserved = 0;
1430 }
1431
1432 /*
1433 * The protocol defines ca_maxresponssize_cached to include the size of
1434 * the rpc header, but all we need to cache is the data starting after
1435 * the end of the initial SEQUENCE operation--the rest we regenerate
1436 * each time. Therefore we can advertise a ca_maxresponssize_cached
1437 * value that is the number of bytes in our cache plus a few additional
1438 * bytes. In order to stay on the safe side, and not promise more than
1439 * we can cache, those additional bytes must be the minimum possible: 24
1440 * bytes of rpc header (xid through accept state, with AUTH_NULL
1441 * verifier), 12 for the compound header (with zero-length tag), and 44
1442 * for the SEQUENCE op response:
1443 */
1444 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1445
1446 static void
1447 free_session_slots(struct nfsd4_session *ses)
1448 {
1449 int i;
1450
1451 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1452 free_svc_cred(&ses->se_slots[i]->sl_cred);
1453 kfree(ses->se_slots[i]);
1454 }
1455 }
1456
1457 /*
1458 * We don't actually need to cache the rpc and session headers, so we
1459 * can allocate a little less for each slot:
1460 */
1461 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1462 {
1463 u32 size;
1464
1465 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1466 size = 0;
1467 else
1468 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1469 return size + sizeof(struct nfsd4_slot);
1470 }
1471
1472 /*
1473 * XXX: If we run out of reserved DRC memory we could (up to a point)
1474 * re-negotiate active sessions and reduce their slot usage to make
1475 * room for new connections. For now we just fail the create session.
1476 */
1477 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1478 {
1479 u32 slotsize = slot_bytes(ca);
1480 u32 num = ca->maxreqs;
1481 int avail;
1482
1483 spin_lock(&nfsd_drc_lock);
1484 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1485 nfsd_drc_max_mem - nfsd_drc_mem_used);
1486 /*
1487 * Never use more than a third of the remaining memory,
1488 * unless it's the only way to give this client a slot:
1489 */
1490 avail = clamp_t(int, avail, slotsize, avail/3);
1491 num = min_t(int, num, avail / slotsize);
1492 nfsd_drc_mem_used += num * slotsize;
1493 spin_unlock(&nfsd_drc_lock);
1494
1495 return num;
1496 }
1497
1498 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1499 {
1500 int slotsize = slot_bytes(ca);
1501
1502 spin_lock(&nfsd_drc_lock);
1503 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1504 spin_unlock(&nfsd_drc_lock);
1505 }
1506
1507 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1508 struct nfsd4_channel_attrs *battrs)
1509 {
1510 int numslots = fattrs->maxreqs;
1511 int slotsize = slot_bytes(fattrs);
1512 struct nfsd4_session *new;
1513 int mem, i;
1514
1515 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1516 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1517 mem = numslots * sizeof(struct nfsd4_slot *);
1518
1519 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1520 if (!new)
1521 return NULL;
1522 /* allocate each struct nfsd4_slot and data cache in one piece */
1523 for (i = 0; i < numslots; i++) {
1524 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1525 if (!new->se_slots[i])
1526 goto out_free;
1527 }
1528
1529 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1530 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1531
1532 return new;
1533 out_free:
1534 while (i--)
1535 kfree(new->se_slots[i]);
1536 kfree(new);
1537 return NULL;
1538 }
1539
1540 static void free_conn(struct nfsd4_conn *c)
1541 {
1542 svc_xprt_put(c->cn_xprt);
1543 kfree(c);
1544 }
1545
1546 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1547 {
1548 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1549 struct nfs4_client *clp = c->cn_session->se_client;
1550
1551 spin_lock(&clp->cl_lock);
1552 if (!list_empty(&c->cn_persession)) {
1553 list_del(&c->cn_persession);
1554 free_conn(c);
1555 }
1556 nfsd4_probe_callback(clp);
1557 spin_unlock(&clp->cl_lock);
1558 }
1559
1560 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1561 {
1562 struct nfsd4_conn *conn;
1563
1564 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1565 if (!conn)
1566 return NULL;
1567 svc_xprt_get(rqstp->rq_xprt);
1568 conn->cn_xprt = rqstp->rq_xprt;
1569 conn->cn_flags = flags;
1570 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1571 return conn;
1572 }
1573
1574 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1575 {
1576 conn->cn_session = ses;
1577 list_add(&conn->cn_persession, &ses->se_conns);
1578 }
1579
1580 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1581 {
1582 struct nfs4_client *clp = ses->se_client;
1583
1584 spin_lock(&clp->cl_lock);
1585 __nfsd4_hash_conn(conn, ses);
1586 spin_unlock(&clp->cl_lock);
1587 }
1588
1589 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1590 {
1591 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1592 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1593 }
1594
1595 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1596 {
1597 int ret;
1598
1599 nfsd4_hash_conn(conn, ses);
1600 ret = nfsd4_register_conn(conn);
1601 if (ret)
1602 /* oops; xprt is already down: */
1603 nfsd4_conn_lost(&conn->cn_xpt_user);
1604 /* We may have gained or lost a callback channel: */
1605 nfsd4_probe_callback_sync(ses->se_client);
1606 }
1607
1608 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1609 {
1610 u32 dir = NFS4_CDFC4_FORE;
1611
1612 if (cses->flags & SESSION4_BACK_CHAN)
1613 dir |= NFS4_CDFC4_BACK;
1614 return alloc_conn(rqstp, dir);
1615 }
1616
1617 /* must be called under client_lock */
1618 static void nfsd4_del_conns(struct nfsd4_session *s)
1619 {
1620 struct nfs4_client *clp = s->se_client;
1621 struct nfsd4_conn *c;
1622
1623 spin_lock(&clp->cl_lock);
1624 while (!list_empty(&s->se_conns)) {
1625 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1626 list_del_init(&c->cn_persession);
1627 spin_unlock(&clp->cl_lock);
1628
1629 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1630 free_conn(c);
1631
1632 spin_lock(&clp->cl_lock);
1633 }
1634 spin_unlock(&clp->cl_lock);
1635 }
1636
1637 static void __free_session(struct nfsd4_session *ses)
1638 {
1639 free_session_slots(ses);
1640 kfree(ses);
1641 }
1642
1643 static void free_session(struct nfsd4_session *ses)
1644 {
1645 nfsd4_del_conns(ses);
1646 nfsd4_put_drc_mem(&ses->se_fchannel);
1647 __free_session(ses);
1648 }
1649
1650 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1651 {
1652 int idx;
1653 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1654
1655 new->se_client = clp;
1656 gen_sessionid(new);
1657
1658 INIT_LIST_HEAD(&new->se_conns);
1659
1660 new->se_cb_seq_nr = 1;
1661 new->se_flags = cses->flags;
1662 new->se_cb_prog = cses->callback_prog;
1663 new->se_cb_sec = cses->cb_sec;
1664 atomic_set(&new->se_ref, 0);
1665 idx = hash_sessionid(&new->se_sessionid);
1666 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1667 spin_lock(&clp->cl_lock);
1668 list_add(&new->se_perclnt, &clp->cl_sessions);
1669 spin_unlock(&clp->cl_lock);
1670
1671 {
1672 struct sockaddr *sa = svc_addr(rqstp);
1673 /*
1674 * This is a little silly; with sessions there's no real
1675 * use for the callback address. Use the peer address
1676 * as a reasonable default for now, but consider fixing
1677 * the rpc client not to require an address in the
1678 * future:
1679 */
1680 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1681 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1682 }
1683 }
1684
1685 /* caller must hold client_lock */
1686 static struct nfsd4_session *
1687 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1688 {
1689 struct nfsd4_session *elem;
1690 int idx;
1691 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1692
1693 lockdep_assert_held(&nn->client_lock);
1694
1695 dump_sessionid(__func__, sessionid);
1696 idx = hash_sessionid(sessionid);
1697 /* Search in the appropriate list */
1698 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1699 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1700 NFS4_MAX_SESSIONID_LEN)) {
1701 return elem;
1702 }
1703 }
1704
1705 dprintk("%s: session not found\n", __func__);
1706 return NULL;
1707 }
1708
1709 static struct nfsd4_session *
1710 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1711 __be32 *ret)
1712 {
1713 struct nfsd4_session *session;
1714 __be32 status = nfserr_badsession;
1715
1716 session = __find_in_sessionid_hashtbl(sessionid, net);
1717 if (!session)
1718 goto out;
1719 status = nfsd4_get_session_locked(session);
1720 if (status)
1721 session = NULL;
1722 out:
1723 *ret = status;
1724 return session;
1725 }
1726
1727 /* caller must hold client_lock */
1728 static void
1729 unhash_session(struct nfsd4_session *ses)
1730 {
1731 struct nfs4_client *clp = ses->se_client;
1732 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1733
1734 lockdep_assert_held(&nn->client_lock);
1735
1736 list_del(&ses->se_hash);
1737 spin_lock(&ses->se_client->cl_lock);
1738 list_del(&ses->se_perclnt);
1739 spin_unlock(&ses->se_client->cl_lock);
1740 }
1741
1742 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1743 static int
1744 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1745 {
1746 /*
1747 * We're assuming the clid was not given out from a boot
1748 * precisely 2^32 (about 136 years) before this one. That seems
1749 * a safe assumption:
1750 */
1751 if (clid->cl_boot == (u32)nn->boot_time)
1752 return 0;
1753 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1754 clid->cl_boot, clid->cl_id, nn->boot_time);
1755 return 1;
1756 }
1757
1758 /*
1759 * XXX Should we use a slab cache ?
1760 * This type of memory management is somewhat inefficient, but we use it
1761 * anyway since SETCLIENTID is not a common operation.
1762 */
1763 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1764 {
1765 struct nfs4_client *clp;
1766 int i;
1767
1768 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1769 if (clp == NULL)
1770 return NULL;
1771 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1772 if (clp->cl_name.data == NULL)
1773 goto err_no_name;
1774 clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1775 OWNER_HASH_SIZE, GFP_KERNEL);
1776 if (!clp->cl_ownerstr_hashtbl)
1777 goto err_no_hashtbl;
1778 for (i = 0; i < OWNER_HASH_SIZE; i++)
1779 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1780 clp->cl_name.len = name.len;
1781 INIT_LIST_HEAD(&clp->cl_sessions);
1782 idr_init(&clp->cl_stateids);
1783 atomic_set(&clp->cl_refcount, 0);
1784 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1785 INIT_LIST_HEAD(&clp->cl_idhash);
1786 INIT_LIST_HEAD(&clp->cl_openowners);
1787 INIT_LIST_HEAD(&clp->cl_delegations);
1788 INIT_LIST_HEAD(&clp->cl_lru);
1789 INIT_LIST_HEAD(&clp->cl_revoked);
1790 #ifdef CONFIG_NFSD_PNFS
1791 INIT_LIST_HEAD(&clp->cl_lo_states);
1792 #endif
1793 spin_lock_init(&clp->cl_lock);
1794 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1795 return clp;
1796 err_no_hashtbl:
1797 kfree(clp->cl_name.data);
1798 err_no_name:
1799 kfree(clp);
1800 return NULL;
1801 }
1802
1803 static void
1804 free_client(struct nfs4_client *clp)
1805 {
1806 while (!list_empty(&clp->cl_sessions)) {
1807 struct nfsd4_session *ses;
1808 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1809 se_perclnt);
1810 list_del(&ses->se_perclnt);
1811 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1812 free_session(ses);
1813 }
1814 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1815 free_svc_cred(&clp->cl_cred);
1816 kfree(clp->cl_ownerstr_hashtbl);
1817 kfree(clp->cl_name.data);
1818 idr_destroy(&clp->cl_stateids);
1819 kfree(clp);
1820 }
1821
1822 /* must be called under the client_lock */
1823 static void
1824 unhash_client_locked(struct nfs4_client *clp)
1825 {
1826 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1827 struct nfsd4_session *ses;
1828
1829 lockdep_assert_held(&nn->client_lock);
1830
1831 /* Mark the client as expired! */
1832 clp->cl_time = 0;
1833 /* Make it invisible */
1834 if (!list_empty(&clp->cl_idhash)) {
1835 list_del_init(&clp->cl_idhash);
1836 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1837 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1838 else
1839 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1840 }
1841 list_del_init(&clp->cl_lru);
1842 spin_lock(&clp->cl_lock);
1843 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1844 list_del_init(&ses->se_hash);
1845 spin_unlock(&clp->cl_lock);
1846 }
1847
1848 static void
1849 unhash_client(struct nfs4_client *clp)
1850 {
1851 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1852
1853 spin_lock(&nn->client_lock);
1854 unhash_client_locked(clp);
1855 spin_unlock(&nn->client_lock);
1856 }
1857
1858 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1859 {
1860 if (atomic_read(&clp->cl_refcount))
1861 return nfserr_jukebox;
1862 unhash_client_locked(clp);
1863 return nfs_ok;
1864 }
1865
1866 static void
1867 __destroy_client(struct nfs4_client *clp)
1868 {
1869 struct nfs4_openowner *oo;
1870 struct nfs4_delegation *dp;
1871 struct list_head reaplist;
1872
1873 INIT_LIST_HEAD(&reaplist);
1874 spin_lock(&state_lock);
1875 while (!list_empty(&clp->cl_delegations)) {
1876 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1877 WARN_ON(!unhash_delegation_locked(dp));
1878 list_add(&dp->dl_recall_lru, &reaplist);
1879 }
1880 spin_unlock(&state_lock);
1881 while (!list_empty(&reaplist)) {
1882 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1883 list_del_init(&dp->dl_recall_lru);
1884 put_clnt_odstate(dp->dl_clnt_odstate);
1885 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1886 nfs4_put_stid(&dp->dl_stid);
1887 }
1888 while (!list_empty(&clp->cl_revoked)) {
1889 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1890 list_del_init(&dp->dl_recall_lru);
1891 nfs4_put_stid(&dp->dl_stid);
1892 }
1893 while (!list_empty(&clp->cl_openowners)) {
1894 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1895 nfs4_get_stateowner(&oo->oo_owner);
1896 release_openowner(oo);
1897 }
1898 nfsd4_return_all_client_layouts(clp);
1899 nfsd4_shutdown_callback(clp);
1900 if (clp->cl_cb_conn.cb_xprt)
1901 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1902 free_client(clp);
1903 }
1904
1905 static void
1906 destroy_client(struct nfs4_client *clp)
1907 {
1908 unhash_client(clp);
1909 __destroy_client(clp);
1910 }
1911
1912 static void expire_client(struct nfs4_client *clp)
1913 {
1914 unhash_client(clp);
1915 nfsd4_client_record_remove(clp);
1916 __destroy_client(clp);
1917 }
1918
1919 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1920 {
1921 memcpy(target->cl_verifier.data, source->data,
1922 sizeof(target->cl_verifier.data));
1923 }
1924
1925 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1926 {
1927 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1928 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1929 }
1930
1931 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1932 {
1933 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
1934 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
1935 GFP_KERNEL);
1936 if ((source->cr_principal && ! target->cr_principal) ||
1937 (source->cr_raw_principal && ! target->cr_raw_principal))
1938 return -ENOMEM;
1939
1940 target->cr_flavor = source->cr_flavor;
1941 target->cr_uid = source->cr_uid;
1942 target->cr_gid = source->cr_gid;
1943 target->cr_group_info = source->cr_group_info;
1944 get_group_info(target->cr_group_info);
1945 target->cr_gss_mech = source->cr_gss_mech;
1946 if (source->cr_gss_mech)
1947 gss_mech_get(source->cr_gss_mech);
1948 return 0;
1949 }
1950
1951 static int
1952 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1953 {
1954 if (o1->len < o2->len)
1955 return -1;
1956 if (o1->len > o2->len)
1957 return 1;
1958 return memcmp(o1->data, o2->data, o1->len);
1959 }
1960
1961 static int same_name(const char *n1, const char *n2)
1962 {
1963 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1964 }
1965
1966 static int
1967 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1968 {
1969 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1970 }
1971
1972 static int
1973 same_clid(clientid_t *cl1, clientid_t *cl2)
1974 {
1975 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1976 }
1977
1978 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1979 {
1980 int i;
1981
1982 if (g1->ngroups != g2->ngroups)
1983 return false;
1984 for (i=0; i<g1->ngroups; i++)
1985 if (!gid_eq(g1->gid[i], g2->gid[i]))
1986 return false;
1987 return true;
1988 }
1989
1990 /*
1991 * RFC 3530 language requires clid_inuse be returned when the
1992 * "principal" associated with a requests differs from that previously
1993 * used. We use uid, gid's, and gss principal string as our best
1994 * approximation. We also don't want to allow non-gss use of a client
1995 * established using gss: in theory cr_principal should catch that
1996 * change, but in practice cr_principal can be null even in the gss case
1997 * since gssd doesn't always pass down a principal string.
1998 */
1999 static bool is_gss_cred(struct svc_cred *cr)
2000 {
2001 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2002 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2003 }
2004
2005
2006 static bool
2007 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2008 {
2009 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2010 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2011 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2012 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2013 return false;
2014 if (cr1->cr_principal == cr2->cr_principal)
2015 return true;
2016 if (!cr1->cr_principal || !cr2->cr_principal)
2017 return false;
2018 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2019 }
2020
2021 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2022 {
2023 struct svc_cred *cr = &rqstp->rq_cred;
2024 u32 service;
2025
2026 if (!cr->cr_gss_mech)
2027 return false;
2028 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2029 return service == RPC_GSS_SVC_INTEGRITY ||
2030 service == RPC_GSS_SVC_PRIVACY;
2031 }
2032
2033 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2034 {
2035 struct svc_cred *cr = &rqstp->rq_cred;
2036
2037 if (!cl->cl_mach_cred)
2038 return true;
2039 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2040 return false;
2041 if (!svc_rqst_integrity_protected(rqstp))
2042 return false;
2043 if (cl->cl_cred.cr_raw_principal)
2044 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2045 cr->cr_raw_principal);
2046 if (!cr->cr_principal)
2047 return false;
2048 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2049 }
2050
2051 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2052 {
2053 __be32 verf[2];
2054
2055 /*
2056 * This is opaque to client, so no need to byte-swap. Use
2057 * __force to keep sparse happy
2058 */
2059 verf[0] = (__force __be32)get_seconds();
2060 verf[1] = (__force __be32)nn->clverifier_counter++;
2061 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2062 }
2063
2064 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2065 {
2066 clp->cl_clientid.cl_boot = nn->boot_time;
2067 clp->cl_clientid.cl_id = nn->clientid_counter++;
2068 gen_confirm(clp, nn);
2069 }
2070
2071 static struct nfs4_stid *
2072 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2073 {
2074 struct nfs4_stid *ret;
2075
2076 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2077 if (!ret || !ret->sc_type)
2078 return NULL;
2079 return ret;
2080 }
2081
2082 static struct nfs4_stid *
2083 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2084 {
2085 struct nfs4_stid *s;
2086
2087 spin_lock(&cl->cl_lock);
2088 s = find_stateid_locked(cl, t);
2089 if (s != NULL) {
2090 if (typemask & s->sc_type)
2091 refcount_inc(&s->sc_count);
2092 else
2093 s = NULL;
2094 }
2095 spin_unlock(&cl->cl_lock);
2096 return s;
2097 }
2098
2099 static struct nfs4_client *create_client(struct xdr_netobj name,
2100 struct svc_rqst *rqstp, nfs4_verifier *verf)
2101 {
2102 struct nfs4_client *clp;
2103 struct sockaddr *sa = svc_addr(rqstp);
2104 int ret;
2105 struct net *net = SVC_NET(rqstp);
2106
2107 clp = alloc_client(name);
2108 if (clp == NULL)
2109 return NULL;
2110
2111 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2112 if (ret) {
2113 free_client(clp);
2114 return NULL;
2115 }
2116 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2117 clp->cl_time = get_seconds();
2118 clear_bit(0, &clp->cl_cb_slot_busy);
2119 copy_verf(clp, verf);
2120 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
2121 clp->cl_cb_session = NULL;
2122 clp->net = net;
2123 return clp;
2124 }
2125
2126 static void
2127 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2128 {
2129 struct rb_node **new = &(root->rb_node), *parent = NULL;
2130 struct nfs4_client *clp;
2131
2132 while (*new) {
2133 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2134 parent = *new;
2135
2136 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2137 new = &((*new)->rb_left);
2138 else
2139 new = &((*new)->rb_right);
2140 }
2141
2142 rb_link_node(&new_clp->cl_namenode, parent, new);
2143 rb_insert_color(&new_clp->cl_namenode, root);
2144 }
2145
2146 static struct nfs4_client *
2147 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2148 {
2149 int cmp;
2150 struct rb_node *node = root->rb_node;
2151 struct nfs4_client *clp;
2152
2153 while (node) {
2154 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2155 cmp = compare_blob(&clp->cl_name, name);
2156 if (cmp > 0)
2157 node = node->rb_left;
2158 else if (cmp < 0)
2159 node = node->rb_right;
2160 else
2161 return clp;
2162 }
2163 return NULL;
2164 }
2165
2166 static void
2167 add_to_unconfirmed(struct nfs4_client *clp)
2168 {
2169 unsigned int idhashval;
2170 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2171
2172 lockdep_assert_held(&nn->client_lock);
2173
2174 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2175 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2176 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2177 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2178 renew_client_locked(clp);
2179 }
2180
2181 static void
2182 move_to_confirmed(struct nfs4_client *clp)
2183 {
2184 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2185 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2186
2187 lockdep_assert_held(&nn->client_lock);
2188
2189 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2190 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2191 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2192 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2193 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2194 renew_client_locked(clp);
2195 }
2196
2197 static struct nfs4_client *
2198 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2199 {
2200 struct nfs4_client *clp;
2201 unsigned int idhashval = clientid_hashval(clid->cl_id);
2202
2203 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2204 if (same_clid(&clp->cl_clientid, clid)) {
2205 if ((bool)clp->cl_minorversion != sessions)
2206 return NULL;
2207 renew_client_locked(clp);
2208 return clp;
2209 }
2210 }
2211 return NULL;
2212 }
2213
2214 static struct nfs4_client *
2215 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2216 {
2217 struct list_head *tbl = nn->conf_id_hashtbl;
2218
2219 lockdep_assert_held(&nn->client_lock);
2220 return find_client_in_id_table(tbl, clid, sessions);
2221 }
2222
2223 static struct nfs4_client *
2224 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2225 {
2226 struct list_head *tbl = nn->unconf_id_hashtbl;
2227
2228 lockdep_assert_held(&nn->client_lock);
2229 return find_client_in_id_table(tbl, clid, sessions);
2230 }
2231
2232 static bool clp_used_exchangeid(struct nfs4_client *clp)
2233 {
2234 return clp->cl_exchange_flags != 0;
2235 }
2236
2237 static struct nfs4_client *
2238 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2239 {
2240 lockdep_assert_held(&nn->client_lock);
2241 return find_clp_in_name_tree(name, &nn->conf_name_tree);
2242 }
2243
2244 static struct nfs4_client *
2245 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2246 {
2247 lockdep_assert_held(&nn->client_lock);
2248 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2249 }
2250
2251 static void
2252 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2253 {
2254 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2255 struct sockaddr *sa = svc_addr(rqstp);
2256 u32 scopeid = rpc_get_scope_id(sa);
2257 unsigned short expected_family;
2258
2259 /* Currently, we only support tcp and tcp6 for the callback channel */
2260 if (se->se_callback_netid_len == 3 &&
2261 !memcmp(se->se_callback_netid_val, "tcp", 3))
2262 expected_family = AF_INET;
2263 else if (se->se_callback_netid_len == 4 &&
2264 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2265 expected_family = AF_INET6;
2266 else
2267 goto out_err;
2268
2269 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2270 se->se_callback_addr_len,
2271 (struct sockaddr *)&conn->cb_addr,
2272 sizeof(conn->cb_addr));
2273
2274 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2275 goto out_err;
2276
2277 if (conn->cb_addr.ss_family == AF_INET6)
2278 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2279
2280 conn->cb_prog = se->se_callback_prog;
2281 conn->cb_ident = se->se_callback_ident;
2282 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2283 return;
2284 out_err:
2285 conn->cb_addr.ss_family = AF_UNSPEC;
2286 conn->cb_addrlen = 0;
2287 dprintk("NFSD: this client (clientid %08x/%08x) "
2288 "will not receive delegations\n",
2289 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2290
2291 return;
2292 }
2293
2294 /*
2295 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2296 */
2297 static void
2298 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2299 {
2300 struct xdr_buf *buf = resp->xdr.buf;
2301 struct nfsd4_slot *slot = resp->cstate.slot;
2302 unsigned int base;
2303
2304 dprintk("--> %s slot %p\n", __func__, slot);
2305
2306 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2307 slot->sl_opcnt = resp->opcnt;
2308 slot->sl_status = resp->cstate.status;
2309 free_svc_cred(&slot->sl_cred);
2310 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2311
2312 if (!nfsd4_cache_this(resp)) {
2313 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2314 return;
2315 }
2316 slot->sl_flags |= NFSD4_SLOT_CACHED;
2317
2318 base = resp->cstate.data_offset;
2319 slot->sl_datalen = buf->len - base;
2320 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2321 WARN(1, "%s: sessions DRC could not cache compound\n",
2322 __func__);
2323 return;
2324 }
2325
2326 /*
2327 * Encode the replay sequence operation from the slot values.
2328 * If cachethis is FALSE encode the uncached rep error on the next
2329 * operation which sets resp->p and increments resp->opcnt for
2330 * nfs4svc_encode_compoundres.
2331 *
2332 */
2333 static __be32
2334 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2335 struct nfsd4_compoundres *resp)
2336 {
2337 struct nfsd4_op *op;
2338 struct nfsd4_slot *slot = resp->cstate.slot;
2339
2340 /* Encode the replayed sequence operation */
2341 op = &args->ops[resp->opcnt - 1];
2342 nfsd4_encode_operation(resp, op);
2343
2344 if (slot->sl_flags & NFSD4_SLOT_CACHED)
2345 return op->status;
2346 if (args->opcnt == 1) {
2347 /*
2348 * The original operation wasn't a solo sequence--we
2349 * always cache those--so this retry must not match the
2350 * original:
2351 */
2352 op->status = nfserr_seq_false_retry;
2353 } else {
2354 op = &args->ops[resp->opcnt++];
2355 op->status = nfserr_retry_uncached_rep;
2356 nfsd4_encode_operation(resp, op);
2357 }
2358 return op->status;
2359 }
2360
2361 /*
2362 * The sequence operation is not cached because we can use the slot and
2363 * session values.
2364 */
2365 static __be32
2366 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2367 struct nfsd4_sequence *seq)
2368 {
2369 struct nfsd4_slot *slot = resp->cstate.slot;
2370 struct xdr_stream *xdr = &resp->xdr;
2371 __be32 *p;
2372 __be32 status;
2373
2374 dprintk("--> %s slot %p\n", __func__, slot);
2375
2376 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2377 if (status)
2378 return status;
2379
2380 p = xdr_reserve_space(xdr, slot->sl_datalen);
2381 if (!p) {
2382 WARN_ON_ONCE(1);
2383 return nfserr_serverfault;
2384 }
2385 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2386 xdr_commit_encode(xdr);
2387
2388 resp->opcnt = slot->sl_opcnt;
2389 return slot->sl_status;
2390 }
2391
2392 /*
2393 * Set the exchange_id flags returned by the server.
2394 */
2395 static void
2396 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2397 {
2398 #ifdef CONFIG_NFSD_PNFS
2399 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2400 #else
2401 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2402 #endif
2403
2404 /* Referrals are supported, Migration is not. */
2405 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2406
2407 /* set the wire flags to return to client. */
2408 clid->flags = new->cl_exchange_flags;
2409 }
2410
2411 static bool client_has_openowners(struct nfs4_client *clp)
2412 {
2413 struct nfs4_openowner *oo;
2414
2415 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2416 if (!list_empty(&oo->oo_owner.so_stateids))
2417 return true;
2418 }
2419 return false;
2420 }
2421
2422 static bool client_has_state(struct nfs4_client *clp)
2423 {
2424 return client_has_openowners(clp)
2425 #ifdef CONFIG_NFSD_PNFS
2426 || !list_empty(&clp->cl_lo_states)
2427 #endif
2428 || !list_empty(&clp->cl_delegations)
2429 || !list_empty(&clp->cl_sessions);
2430 }
2431
2432 __be32
2433 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2434 union nfsd4_op_u *u)
2435 {
2436 struct nfsd4_exchange_id *exid = &u->exchange_id;
2437 struct nfs4_client *conf, *new;
2438 struct nfs4_client *unconf = NULL;
2439 __be32 status;
2440 char addr_str[INET6_ADDRSTRLEN];
2441 nfs4_verifier verf = exid->verifier;
2442 struct sockaddr *sa = svc_addr(rqstp);
2443 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2444 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2445
2446 rpc_ntop(sa, addr_str, sizeof(addr_str));
2447 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2448 "ip_addr=%s flags %x, spa_how %d\n",
2449 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
2450 addr_str, exid->flags, exid->spa_how);
2451
2452 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2453 return nfserr_inval;
2454
2455 new = create_client(exid->clname, rqstp, &verf);
2456 if (new == NULL)
2457 return nfserr_jukebox;
2458
2459 switch (exid->spa_how) {
2460 case SP4_MACH_CRED:
2461 exid->spo_must_enforce[0] = 0;
2462 exid->spo_must_enforce[1] = (
2463 1 << (OP_BIND_CONN_TO_SESSION - 32) |
2464 1 << (OP_EXCHANGE_ID - 32) |
2465 1 << (OP_CREATE_SESSION - 32) |
2466 1 << (OP_DESTROY_SESSION - 32) |
2467 1 << (OP_DESTROY_CLIENTID - 32));
2468
2469 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
2470 1 << (OP_OPEN_DOWNGRADE) |
2471 1 << (OP_LOCKU) |
2472 1 << (OP_DELEGRETURN));
2473
2474 exid->spo_must_allow[1] &= (
2475 1 << (OP_TEST_STATEID - 32) |
2476 1 << (OP_FREE_STATEID - 32));
2477 if (!svc_rqst_integrity_protected(rqstp)) {
2478 status = nfserr_inval;
2479 goto out_nolock;
2480 }
2481 /*
2482 * Sometimes userspace doesn't give us a principal.
2483 * Which is a bug, really. Anyway, we can't enforce
2484 * MACH_CRED in that case, better to give up now:
2485 */
2486 if (!new->cl_cred.cr_principal &&
2487 !new->cl_cred.cr_raw_principal) {
2488 status = nfserr_serverfault;
2489 goto out_nolock;
2490 }
2491 new->cl_mach_cred = true;
2492 case SP4_NONE:
2493 break;
2494 default: /* checked by xdr code */
2495 WARN_ON_ONCE(1);
2496 case SP4_SSV:
2497 status = nfserr_encr_alg_unsupp;
2498 goto out_nolock;
2499 }
2500
2501 /* Cases below refer to rfc 5661 section 18.35.4: */
2502 spin_lock(&nn->client_lock);
2503 conf = find_confirmed_client_by_name(&exid->clname, nn);
2504 if (conf) {
2505 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2506 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2507
2508 if (update) {
2509 if (!clp_used_exchangeid(conf)) { /* buggy client */
2510 status = nfserr_inval;
2511 goto out;
2512 }
2513 if (!nfsd4_mach_creds_match(conf, rqstp)) {
2514 status = nfserr_wrong_cred;
2515 goto out;
2516 }
2517 if (!creds_match) { /* case 9 */
2518 status = nfserr_perm;
2519 goto out;
2520 }
2521 if (!verfs_match) { /* case 8 */
2522 status = nfserr_not_same;
2523 goto out;
2524 }
2525 /* case 6 */
2526 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2527 goto out_copy;
2528 }
2529 if (!creds_match) { /* case 3 */
2530 if (client_has_state(conf)) {
2531 status = nfserr_clid_inuse;
2532 goto out;
2533 }
2534 goto out_new;
2535 }
2536 if (verfs_match) { /* case 2 */
2537 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2538 goto out_copy;
2539 }
2540 /* case 5, client reboot */
2541 conf = NULL;
2542 goto out_new;
2543 }
2544
2545 if (update) { /* case 7 */
2546 status = nfserr_noent;
2547 goto out;
2548 }
2549
2550 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
2551 if (unconf) /* case 4, possible retry or client restart */
2552 unhash_client_locked(unconf);
2553
2554 /* case 1 (normal case) */
2555 out_new:
2556 if (conf) {
2557 status = mark_client_expired_locked(conf);
2558 if (status)
2559 goto out;
2560 }
2561 new->cl_minorversion = cstate->minorversion;
2562 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
2563 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
2564
2565 gen_clid(new, nn);
2566 add_to_unconfirmed(new);
2567 swap(new, conf);
2568 out_copy:
2569 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2570 exid->clientid.cl_id = conf->cl_clientid.cl_id;
2571
2572 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2573 nfsd4_set_ex_flags(conf, exid);
2574
2575 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2576 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2577 status = nfs_ok;
2578
2579 out:
2580 spin_unlock(&nn->client_lock);
2581 out_nolock:
2582 if (new)
2583 expire_client(new);
2584 if (unconf)
2585 expire_client(unconf);
2586 return status;
2587 }
2588
2589 static __be32
2590 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2591 {
2592 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2593 slot_seqid);
2594
2595 /* The slot is in use, and no response has been sent. */
2596 if (slot_inuse) {
2597 if (seqid == slot_seqid)
2598 return nfserr_jukebox;
2599 else
2600 return nfserr_seq_misordered;
2601 }
2602 /* Note unsigned 32-bit arithmetic handles wraparound: */
2603 if (likely(seqid == slot_seqid + 1))
2604 return nfs_ok;
2605 if (seqid == slot_seqid)
2606 return nfserr_replay_cache;
2607 return nfserr_seq_misordered;
2608 }
2609
2610 /*
2611 * Cache the create session result into the create session single DRC
2612 * slot cache by saving the xdr structure. sl_seqid has been set.
2613 * Do this for solo or embedded create session operations.
2614 */
2615 static void
2616 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2617 struct nfsd4_clid_slot *slot, __be32 nfserr)
2618 {
2619 slot->sl_status = nfserr;
2620 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2621 }
2622
2623 static __be32
2624 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2625 struct nfsd4_clid_slot *slot)
2626 {
2627 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2628 return slot->sl_status;
2629 }
2630
2631 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2632 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2633 1 + /* MIN tag is length with zero, only length */ \
2634 3 + /* version, opcount, opcode */ \
2635 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2636 /* seqid, slotID, slotID, cache */ \
2637 4 ) * sizeof(__be32))
2638
2639 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2640 2 + /* verifier: AUTH_NULL, length 0 */\
2641 1 + /* status */ \
2642 1 + /* MIN tag is length with zero, only length */ \
2643 3 + /* opcount, opcode, opstatus*/ \
2644 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2645 /* seqid, slotID, slotID, slotID, status */ \
2646 5 ) * sizeof(__be32))
2647
2648 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2649 {
2650 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2651
2652 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2653 return nfserr_toosmall;
2654 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2655 return nfserr_toosmall;
2656 ca->headerpadsz = 0;
2657 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2658 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2659 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2660 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2661 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2662 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2663 /*
2664 * Note decreasing slot size below client's request may make it
2665 * difficult for client to function correctly, whereas
2666 * decreasing the number of slots will (just?) affect
2667 * performance. When short on memory we therefore prefer to
2668 * decrease number of slots instead of their size. Clients that
2669 * request larger slots than they need will get poor results:
2670 */
2671 ca->maxreqs = nfsd4_get_drc_mem(ca);
2672 if (!ca->maxreqs)
2673 return nfserr_jukebox;
2674
2675 return nfs_ok;
2676 }
2677
2678 /*
2679 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2680 * These are based on similar macros in linux/sunrpc/msg_prot.h .
2681 */
2682 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
2683 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2684
2685 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2686 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2687
2688 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2689 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
2690 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2691 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2692 sizeof(__be32))
2693
2694 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2695 {
2696 ca->headerpadsz = 0;
2697
2698 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2699 return nfserr_toosmall;
2700 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2701 return nfserr_toosmall;
2702 ca->maxresp_cached = 0;
2703 if (ca->maxops < 2)
2704 return nfserr_toosmall;
2705
2706 return nfs_ok;
2707 }
2708
2709 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2710 {
2711 switch (cbs->flavor) {
2712 case RPC_AUTH_NULL:
2713 case RPC_AUTH_UNIX:
2714 return nfs_ok;
2715 default:
2716 /*
2717 * GSS case: the spec doesn't allow us to return this
2718 * error. But it also doesn't allow us not to support
2719 * GSS.
2720 * I'd rather this fail hard than return some error the
2721 * client might think it can already handle:
2722 */
2723 return nfserr_encr_alg_unsupp;
2724 }
2725 }
2726
2727 __be32
2728 nfsd4_create_session(struct svc_rqst *rqstp,
2729 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
2730 {
2731 struct nfsd4_create_session *cr_ses = &u->create_session;
2732 struct sockaddr *sa = svc_addr(rqstp);
2733 struct nfs4_client *conf, *unconf;
2734 struct nfs4_client *old = NULL;
2735 struct nfsd4_session *new;
2736 struct nfsd4_conn *conn;
2737 struct nfsd4_clid_slot *cs_slot = NULL;
2738 __be32 status = 0;
2739 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2740
2741 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2742 return nfserr_inval;
2743 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2744 if (status)
2745 return status;
2746 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2747 if (status)
2748 return status;
2749 status = check_backchannel_attrs(&cr_ses->back_channel);
2750 if (status)
2751 goto out_release_drc_mem;
2752 status = nfserr_jukebox;
2753 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2754 if (!new)
2755 goto out_release_drc_mem;
2756 conn = alloc_conn_from_crses(rqstp, cr_ses);
2757 if (!conn)
2758 goto out_free_session;
2759
2760 spin_lock(&nn->client_lock);
2761 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2762 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2763 WARN_ON_ONCE(conf && unconf);
2764
2765 if (conf) {
2766 status = nfserr_wrong_cred;
2767 if (!nfsd4_mach_creds_match(conf, rqstp))
2768 goto out_free_conn;
2769 cs_slot = &conf->cl_cs_slot;
2770 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2771 if (status) {
2772 if (status == nfserr_replay_cache)
2773 status = nfsd4_replay_create_session(cr_ses, cs_slot);
2774 goto out_free_conn;
2775 }
2776 } else if (unconf) {
2777 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2778 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2779 status = nfserr_clid_inuse;
2780 goto out_free_conn;
2781 }
2782 status = nfserr_wrong_cred;
2783 if (!nfsd4_mach_creds_match(unconf, rqstp))
2784 goto out_free_conn;
2785 cs_slot = &unconf->cl_cs_slot;
2786 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2787 if (status) {
2788 /* an unconfirmed replay returns misordered */
2789 status = nfserr_seq_misordered;
2790 goto out_free_conn;
2791 }
2792 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2793 if (old) {
2794 status = mark_client_expired_locked(old);
2795 if (status) {
2796 old = NULL;
2797 goto out_free_conn;
2798 }
2799 }
2800 move_to_confirmed(unconf);
2801 conf = unconf;
2802 } else {
2803 status = nfserr_stale_clientid;
2804 goto out_free_conn;
2805 }
2806 status = nfs_ok;
2807 /* Persistent sessions are not supported */
2808 cr_ses->flags &= ~SESSION4_PERSIST;
2809 /* Upshifting from TCP to RDMA is not supported */
2810 cr_ses->flags &= ~SESSION4_RDMA;
2811
2812 init_session(rqstp, new, conf, cr_ses);
2813 nfsd4_get_session_locked(new);
2814
2815 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2816 NFS4_MAX_SESSIONID_LEN);
2817 cs_slot->sl_seqid++;
2818 cr_ses->seqid = cs_slot->sl_seqid;
2819
2820 /* cache solo and embedded create sessions under the client_lock */
2821 nfsd4_cache_create_session(cr_ses, cs_slot, status);
2822 spin_unlock(&nn->client_lock);
2823 /* init connection and backchannel */
2824 nfsd4_init_conn(rqstp, conn, new);
2825 nfsd4_put_session(new);
2826 if (old)
2827 expire_client(old);
2828 return status;
2829 out_free_conn:
2830 spin_unlock(&nn->client_lock);
2831 free_conn(conn);
2832 if (old)
2833 expire_client(old);
2834 out_free_session:
2835 __free_session(new);
2836 out_release_drc_mem:
2837 nfsd4_put_drc_mem(&cr_ses->fore_channel);
2838 return status;
2839 }
2840
2841 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2842 {
2843 switch (*dir) {
2844 case NFS4_CDFC4_FORE:
2845 case NFS4_CDFC4_BACK:
2846 return nfs_ok;
2847 case NFS4_CDFC4_FORE_OR_BOTH:
2848 case NFS4_CDFC4_BACK_OR_BOTH:
2849 *dir = NFS4_CDFC4_BOTH;
2850 return nfs_ok;
2851 };
2852 return nfserr_inval;
2853 }
2854
2855 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
2856 struct nfsd4_compound_state *cstate,
2857 union nfsd4_op_u *u)
2858 {
2859 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
2860 struct nfsd4_session *session = cstate->session;
2861 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2862 __be32 status;
2863
2864 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2865 if (status)
2866 return status;
2867 spin_lock(&nn->client_lock);
2868 session->se_cb_prog = bc->bc_cb_program;
2869 session->se_cb_sec = bc->bc_cb_sec;
2870 spin_unlock(&nn->client_lock);
2871
2872 nfsd4_probe_callback(session->se_client);
2873
2874 return nfs_ok;
2875 }
2876
2877 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2878 struct nfsd4_compound_state *cstate,
2879 union nfsd4_op_u *u)
2880 {
2881 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
2882 __be32 status;
2883 struct nfsd4_conn *conn;
2884 struct nfsd4_session *session;
2885 struct net *net = SVC_NET(rqstp);
2886 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2887
2888 if (!nfsd4_last_compound_op(rqstp))
2889 return nfserr_not_only_op;
2890 spin_lock(&nn->client_lock);
2891 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2892 spin_unlock(&nn->client_lock);
2893 if (!session)
2894 goto out_no_session;
2895 status = nfserr_wrong_cred;
2896 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
2897 goto out;
2898 status = nfsd4_map_bcts_dir(&bcts->dir);
2899 if (status)
2900 goto out;
2901 conn = alloc_conn(rqstp, bcts->dir);
2902 status = nfserr_jukebox;
2903 if (!conn)
2904 goto out;
2905 nfsd4_init_conn(rqstp, conn, session);
2906 status = nfs_ok;
2907 out:
2908 nfsd4_put_session(session);
2909 out_no_session:
2910 return status;
2911 }
2912
2913 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2914 {
2915 if (!session)
2916 return 0;
2917 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2918 }
2919
2920 __be32
2921 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
2922 union nfsd4_op_u *u)
2923 {
2924 struct nfsd4_destroy_session *sessionid = &u->destroy_session;
2925 struct nfsd4_session *ses;
2926 __be32 status;
2927 int ref_held_by_me = 0;
2928 struct net *net = SVC_NET(r);
2929 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2930
2931 status = nfserr_not_only_op;
2932 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2933 if (!nfsd4_last_compound_op(r))
2934 goto out;
2935 ref_held_by_me++;
2936 }
2937 dump_sessionid(__func__, &sessionid->sessionid);
2938 spin_lock(&nn->client_lock);
2939 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2940 if (!ses)
2941 goto out_client_lock;
2942 status = nfserr_wrong_cred;
2943 if (!nfsd4_mach_creds_match(ses->se_client, r))
2944 goto out_put_session;
2945 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2946 if (status)
2947 goto out_put_session;
2948 unhash_session(ses);
2949 spin_unlock(&nn->client_lock);
2950
2951 nfsd4_probe_callback_sync(ses->se_client);
2952
2953 spin_lock(&nn->client_lock);
2954 status = nfs_ok;
2955 out_put_session:
2956 nfsd4_put_session_locked(ses);
2957 out_client_lock:
2958 spin_unlock(&nn->client_lock);
2959 out:
2960 return status;
2961 }
2962
2963 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2964 {
2965 struct nfsd4_conn *c;
2966
2967 list_for_each_entry(c, &s->se_conns, cn_persession) {
2968 if (c->cn_xprt == xpt) {
2969 return c;
2970 }
2971 }
2972 return NULL;
2973 }
2974
2975 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2976 {
2977 struct nfs4_client *clp = ses->se_client;
2978 struct nfsd4_conn *c;
2979 __be32 status = nfs_ok;
2980 int ret;
2981
2982 spin_lock(&clp->cl_lock);
2983 c = __nfsd4_find_conn(new->cn_xprt, ses);
2984 if (c)
2985 goto out_free;
2986 status = nfserr_conn_not_bound_to_session;
2987 if (clp->cl_mach_cred)
2988 goto out_free;
2989 __nfsd4_hash_conn(new, ses);
2990 spin_unlock(&clp->cl_lock);
2991 ret = nfsd4_register_conn(new);
2992 if (ret)
2993 /* oops; xprt is already down: */
2994 nfsd4_conn_lost(&new->cn_xpt_user);
2995 return nfs_ok;
2996 out_free:
2997 spin_unlock(&clp->cl_lock);
2998 free_conn(new);
2999 return status;
3000 }
3001
3002 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3003 {
3004 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3005
3006 return args->opcnt > session->se_fchannel.maxops;
3007 }
3008
3009 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3010 struct nfsd4_session *session)
3011 {
3012 struct xdr_buf *xb = &rqstp->rq_arg;
3013
3014 return xb->len > session->se_fchannel.maxreq_sz;
3015 }
3016
3017 static bool replay_matches_cache(struct svc_rqst *rqstp,
3018 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3019 {
3020 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3021
3022 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3023 (bool)seq->cachethis)
3024 return false;
3025 /*
3026 * If there's an error than the reply can have fewer ops than
3027 * the call. But if we cached a reply with *more* ops than the
3028 * call you're sending us now, then this new call is clearly not
3029 * really a replay of the old one:
3030 */
3031 if (slot->sl_opcnt < argp->opcnt)
3032 return false;
3033 /* This is the only check explicitly called by spec: */
3034 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3035 return false;
3036 /*
3037 * There may be more comparisons we could actually do, but the
3038 * spec doesn't require us to catch every case where the calls
3039 * don't match (that would require caching the call as well as
3040 * the reply), so we don't bother.
3041 */
3042 return true;
3043 }
3044
3045 __be32
3046 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3047 union nfsd4_op_u *u)
3048 {
3049 struct nfsd4_sequence *seq = &u->sequence;
3050 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3051 struct xdr_stream *xdr = &resp->xdr;
3052 struct nfsd4_session *session;
3053 struct nfs4_client *clp;
3054 struct nfsd4_slot *slot;
3055 struct nfsd4_conn *conn;
3056 __be32 status;
3057 int buflen;
3058 struct net *net = SVC_NET(rqstp);
3059 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3060
3061 if (resp->opcnt != 1)
3062 return nfserr_sequence_pos;
3063
3064 /*
3065 * Will be either used or freed by nfsd4_sequence_check_conn
3066 * below.
3067 */
3068 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3069 if (!conn)
3070 return nfserr_jukebox;
3071
3072 spin_lock(&nn->client_lock);
3073 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3074 if (!session)
3075 goto out_no_session;
3076 clp = session->se_client;
3077
3078 status = nfserr_too_many_ops;
3079 if (nfsd4_session_too_many_ops(rqstp, session))
3080 goto out_put_session;
3081
3082 status = nfserr_req_too_big;
3083 if (nfsd4_request_too_big(rqstp, session))
3084 goto out_put_session;
3085
3086 status = nfserr_badslot;
3087 if (seq->slotid >= session->se_fchannel.maxreqs)
3088 goto out_put_session;
3089
3090 slot = session->se_slots[seq->slotid];
3091 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3092
3093 /* We do not negotiate the number of slots yet, so set the
3094 * maxslots to the session maxreqs which is used to encode
3095 * sr_highest_slotid and the sr_target_slot id to maxslots */
3096 seq->maxslots = session->se_fchannel.maxreqs;
3097
3098 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3099 slot->sl_flags & NFSD4_SLOT_INUSE);
3100 if (status == nfserr_replay_cache) {
3101 status = nfserr_seq_misordered;
3102 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3103 goto out_put_session;
3104 status = nfserr_seq_false_retry;
3105 if (!replay_matches_cache(rqstp, seq, slot))
3106 goto out_put_session;
3107 cstate->slot = slot;
3108 cstate->session = session;
3109 cstate->clp = clp;
3110 /* Return the cached reply status and set cstate->status
3111 * for nfsd4_proc_compound processing */
3112 status = nfsd4_replay_cache_entry(resp, seq);
3113 cstate->status = nfserr_replay_cache;
3114 goto out;
3115 }
3116 if (status)
3117 goto out_put_session;
3118
3119 status = nfsd4_sequence_check_conn(conn, session);
3120 conn = NULL;
3121 if (status)
3122 goto out_put_session;
3123
3124 buflen = (seq->cachethis) ?
3125 session->se_fchannel.maxresp_cached :
3126 session->se_fchannel.maxresp_sz;
3127 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3128 nfserr_rep_too_big;
3129 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3130 goto out_put_session;
3131 svc_reserve(rqstp, buflen);
3132
3133 status = nfs_ok;
3134 /* Success! bump slot seqid */
3135 slot->sl_seqid = seq->seqid;
3136 slot->sl_flags |= NFSD4_SLOT_INUSE;
3137 if (seq->cachethis)
3138 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3139 else
3140 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3141
3142 cstate->slot = slot;
3143 cstate->session = session;
3144 cstate->clp = clp;
3145
3146 out:
3147 switch (clp->cl_cb_state) {
3148 case NFSD4_CB_DOWN:
3149 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3150 break;
3151 case NFSD4_CB_FAULT:
3152 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3153 break;
3154 default:
3155 seq->status_flags = 0;
3156 }
3157 if (!list_empty(&clp->cl_revoked))
3158 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3159 out_no_session:
3160 if (conn)
3161 free_conn(conn);
3162 spin_unlock(&nn->client_lock);
3163 return status;
3164 out_put_session:
3165 nfsd4_put_session_locked(session);
3166 goto out_no_session;
3167 }
3168
3169 void
3170 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3171 {
3172 struct nfsd4_compound_state *cs = &resp->cstate;
3173
3174 if (nfsd4_has_session(cs)) {
3175 if (cs->status != nfserr_replay_cache) {
3176 nfsd4_store_cache_entry(resp);
3177 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3178 }
3179 /* Drop session reference that was taken in nfsd4_sequence() */
3180 nfsd4_put_session(cs->session);
3181 } else if (cs->clp)
3182 put_client_renew(cs->clp);
3183 }
3184
3185 __be32
3186 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3187 struct nfsd4_compound_state *cstate,
3188 union nfsd4_op_u *u)
3189 {
3190 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3191 struct nfs4_client *conf, *unconf;
3192 struct nfs4_client *clp = NULL;
3193 __be32 status = 0;
3194 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3195
3196 spin_lock(&nn->client_lock);
3197 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3198 conf = find_confirmed_client(&dc->clientid, true, nn);
3199 WARN_ON_ONCE(conf && unconf);
3200
3201 if (conf) {
3202 if (client_has_state(conf)) {
3203 status = nfserr_clientid_busy;
3204 goto out;
3205 }
3206 status = mark_client_expired_locked(conf);
3207 if (status)
3208 goto out;
3209 clp = conf;
3210 } else if (unconf)
3211 clp = unconf;
3212 else {
3213 status = nfserr_stale_clientid;
3214 goto out;
3215 }
3216 if (!nfsd4_mach_creds_match(clp, rqstp)) {
3217 clp = NULL;
3218 status = nfserr_wrong_cred;
3219 goto out;
3220 }
3221 unhash_client_locked(clp);
3222 out:
3223 spin_unlock(&nn->client_lock);
3224 if (clp)
3225 expire_client(clp);
3226 return status;
3227 }
3228
3229 __be32
3230 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3231 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3232 {
3233 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3234 __be32 status = 0;
3235
3236 if (rc->rca_one_fs) {
3237 if (!cstate->current_fh.fh_dentry)
3238 return nfserr_nofilehandle;
3239 /*
3240 * We don't take advantage of the rca_one_fs case.
3241 * That's OK, it's optional, we can safely ignore it.
3242 */
3243 return nfs_ok;
3244 }
3245
3246 status = nfserr_complete_already;
3247 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3248 &cstate->session->se_client->cl_flags))
3249 goto out;
3250
3251 status = nfserr_stale_clientid;
3252 if (is_client_expired(cstate->session->se_client))
3253 /*
3254 * The following error isn't really legal.
3255 * But we only get here if the client just explicitly
3256 * destroyed the client. Surely it no longer cares what
3257 * error it gets back on an operation for the dead
3258 * client.
3259 */
3260 goto out;
3261
3262 status = nfs_ok;
3263 nfsd4_client_record_create(cstate->session->se_client);
3264 out:
3265 return status;
3266 }
3267
3268 __be32
3269 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3270 union nfsd4_op_u *u)
3271 {
3272 struct nfsd4_setclientid *setclid = &u->setclientid;
3273 struct xdr_netobj clname = setclid->se_name;
3274 nfs4_verifier clverifier = setclid->se_verf;
3275 struct nfs4_client *conf, *new;
3276 struct nfs4_client *unconf = NULL;
3277 __be32 status;
3278 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3279
3280 new = create_client(clname, rqstp, &clverifier);
3281 if (new == NULL)
3282 return nfserr_jukebox;
3283 /* Cases below refer to rfc 3530 section 14.2.33: */
3284 spin_lock(&nn->client_lock);
3285 conf = find_confirmed_client_by_name(&clname, nn);
3286 if (conf && client_has_state(conf)) {
3287 /* case 0: */
3288 status = nfserr_clid_inuse;
3289 if (clp_used_exchangeid(conf))
3290 goto out;
3291 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3292 char addr_str[INET6_ADDRSTRLEN];
3293 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3294 sizeof(addr_str));
3295 dprintk("NFSD: setclientid: string in use by client "
3296 "at %s\n", addr_str);
3297 goto out;
3298 }
3299 }
3300 unconf = find_unconfirmed_client_by_name(&clname, nn);
3301 if (unconf)
3302 unhash_client_locked(unconf);
3303 if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3304 /* case 1: probable callback update */
3305 copy_clid(new, conf);
3306 gen_confirm(new, nn);
3307 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3308 gen_clid(new, nn);
3309 new->cl_minorversion = 0;
3310 gen_callback(new, setclid, rqstp);
3311 add_to_unconfirmed(new);
3312 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3313 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3314 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3315 new = NULL;
3316 status = nfs_ok;
3317 out:
3318 spin_unlock(&nn->client_lock);
3319 if (new)
3320 free_client(new);
3321 if (unconf)
3322 expire_client(unconf);
3323 return status;
3324 }
3325
3326
3327 __be32
3328 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3329 struct nfsd4_compound_state *cstate,
3330 union nfsd4_op_u *u)
3331 {
3332 struct nfsd4_setclientid_confirm *setclientid_confirm =
3333 &u->setclientid_confirm;
3334 struct nfs4_client *conf, *unconf;
3335 struct nfs4_client *old = NULL;
3336 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3337 clientid_t * clid = &setclientid_confirm->sc_clientid;
3338 __be32 status;
3339 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3340
3341 if (STALE_CLIENTID(clid, nn))
3342 return nfserr_stale_clientid;
3343
3344 spin_lock(&nn->client_lock);
3345 conf = find_confirmed_client(clid, false, nn);
3346 unconf = find_unconfirmed_client(clid, false, nn);
3347 /*
3348 * We try hard to give out unique clientid's, so if we get an
3349 * attempt to confirm the same clientid with a different cred,
3350 * the client may be buggy; this should never happen.
3351 *
3352 * Nevertheless, RFC 7530 recommends INUSE for this case:
3353 */
3354 status = nfserr_clid_inuse;
3355 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3356 goto out;
3357 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3358 goto out;
3359 /* cases below refer to rfc 3530 section 14.2.34: */
3360 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3361 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
3362 /* case 2: probable retransmit */
3363 status = nfs_ok;
3364 } else /* case 4: client hasn't noticed we rebooted yet? */
3365 status = nfserr_stale_clientid;
3366 goto out;
3367 }
3368 status = nfs_ok;
3369 if (conf) { /* case 1: callback update */
3370 old = unconf;
3371 unhash_client_locked(old);
3372 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3373 } else { /* case 3: normal case; new or rebooted client */
3374 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3375 if (old) {
3376 status = nfserr_clid_inuse;
3377 if (client_has_state(old)
3378 && !same_creds(&unconf->cl_cred,
3379 &old->cl_cred))
3380 goto out;
3381 status = mark_client_expired_locked(old);
3382 if (status) {
3383 old = NULL;
3384 goto out;
3385 }
3386 }
3387 move_to_confirmed(unconf);
3388 conf = unconf;
3389 }
3390 get_client_locked(conf);
3391 spin_unlock(&nn->client_lock);
3392 nfsd4_probe_callback(conf);
3393 spin_lock(&nn->client_lock);
3394 put_client_renew_locked(conf);
3395 out:
3396 spin_unlock(&nn->client_lock);
3397 if (old)
3398 expire_client(old);
3399 return status;
3400 }
3401
3402 static struct nfs4_file *nfsd4_alloc_file(void)
3403 {
3404 return kmem_cache_alloc(file_slab, GFP_KERNEL);
3405 }
3406
3407 /* OPEN Share state helper functions */
3408 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3409 struct nfs4_file *fp)
3410 {
3411 lockdep_assert_held(&state_lock);
3412
3413 refcount_set(&fp->fi_ref, 1);
3414 spin_lock_init(&fp->fi_lock);
3415 INIT_LIST_HEAD(&fp->fi_stateids);
3416 INIT_LIST_HEAD(&fp->fi_delegations);
3417 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3418 fh_copy_shallow(&fp->fi_fhandle, fh);
3419 fp->fi_deleg_file = NULL;
3420 fp->fi_had_conflict = false;
3421 fp->fi_share_deny = 0;
3422 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3423 memset(fp->fi_access, 0, sizeof(fp->fi_access));
3424 #ifdef CONFIG_NFSD_PNFS
3425 INIT_LIST_HEAD(&fp->fi_lo_states);
3426 atomic_set(&fp->fi_lo_recalls, 0);
3427 #endif
3428 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3429 }
3430
3431 void
3432 nfsd4_free_slabs(void)
3433 {
3434 kmem_cache_destroy(odstate_slab);
3435 kmem_cache_destroy(openowner_slab);
3436 kmem_cache_destroy(lockowner_slab);
3437 kmem_cache_destroy(file_slab);
3438 kmem_cache_destroy(stateid_slab);
3439 kmem_cache_destroy(deleg_slab);
3440 }
3441
3442 int
3443 nfsd4_init_slabs(void)
3444 {
3445 openowner_slab = kmem_cache_create("nfsd4_openowners",
3446 sizeof(struct nfs4_openowner), 0, 0, NULL);
3447 if (openowner_slab == NULL)
3448 goto out;
3449 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3450 sizeof(struct nfs4_lockowner), 0, 0, NULL);
3451 if (lockowner_slab == NULL)
3452 goto out_free_openowner_slab;
3453 file_slab = kmem_cache_create("nfsd4_files",
3454 sizeof(struct nfs4_file), 0, 0, NULL);
3455 if (file_slab == NULL)
3456 goto out_free_lockowner_slab;
3457 stateid_slab = kmem_cache_create("nfsd4_stateids",
3458 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3459 if (stateid_slab == NULL)
3460 goto out_free_file_slab;
3461 deleg_slab = kmem_cache_create("nfsd4_delegations",
3462 sizeof(struct nfs4_delegation), 0, 0, NULL);
3463 if (deleg_slab == NULL)
3464 goto out_free_stateid_slab;
3465 odstate_slab = kmem_cache_create("nfsd4_odstate",
3466 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3467 if (odstate_slab == NULL)
3468 goto out_free_deleg_slab;
3469 return 0;
3470
3471 out_free_deleg_slab:
3472 kmem_cache_destroy(deleg_slab);
3473 out_free_stateid_slab:
3474 kmem_cache_destroy(stateid_slab);
3475 out_free_file_slab:
3476 kmem_cache_destroy(file_slab);
3477 out_free_lockowner_slab:
3478 kmem_cache_destroy(lockowner_slab);
3479 out_free_openowner_slab:
3480 kmem_cache_destroy(openowner_slab);
3481 out:
3482 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3483 return -ENOMEM;
3484 }
3485
3486 static void init_nfs4_replay(struct nfs4_replay *rp)
3487 {
3488 rp->rp_status = nfserr_serverfault;
3489 rp->rp_buflen = 0;
3490 rp->rp_buf = rp->rp_ibuf;
3491 mutex_init(&rp->rp_mutex);
3492 }
3493
3494 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3495 struct nfs4_stateowner *so)
3496 {
3497 if (!nfsd4_has_session(cstate)) {
3498 mutex_lock(&so->so_replay.rp_mutex);
3499 cstate->replay_owner = nfs4_get_stateowner(so);
3500 }
3501 }
3502
3503 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3504 {
3505 struct nfs4_stateowner *so = cstate->replay_owner;
3506
3507 if (so != NULL) {
3508 cstate->replay_owner = NULL;
3509 mutex_unlock(&so->so_replay.rp_mutex);
3510 nfs4_put_stateowner(so);
3511 }
3512 }
3513
3514 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3515 {
3516 struct nfs4_stateowner *sop;
3517
3518 sop = kmem_cache_alloc(slab, GFP_KERNEL);
3519 if (!sop)
3520 return NULL;
3521
3522 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3523 if (!sop->so_owner.data) {
3524 kmem_cache_free(slab, sop);
3525 return NULL;
3526 }
3527 sop->so_owner.len = owner->len;
3528
3529 INIT_LIST_HEAD(&sop->so_stateids);
3530 sop->so_client = clp;
3531 init_nfs4_replay(&sop->so_replay);
3532 atomic_set(&sop->so_count, 1);
3533 return sop;
3534 }
3535
3536 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3537 {
3538 lockdep_assert_held(&clp->cl_lock);
3539
3540 list_add(&oo->oo_owner.so_strhash,
3541 &clp->cl_ownerstr_hashtbl[strhashval]);
3542 list_add(&oo->oo_perclient, &clp->cl_openowners);
3543 }
3544
3545 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3546 {
3547 unhash_openowner_locked(openowner(so));
3548 }
3549
3550 static void nfs4_free_openowner(struct nfs4_stateowner *so)
3551 {
3552 struct nfs4_openowner *oo = openowner(so);
3553
3554 kmem_cache_free(openowner_slab, oo);
3555 }
3556
3557 static const struct nfs4_stateowner_operations openowner_ops = {
3558 .so_unhash = nfs4_unhash_openowner,
3559 .so_free = nfs4_free_openowner,
3560 };
3561
3562 static struct nfs4_ol_stateid *
3563 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3564 {
3565 struct nfs4_ol_stateid *local, *ret = NULL;
3566 struct nfs4_openowner *oo = open->op_openowner;
3567
3568 lockdep_assert_held(&fp->fi_lock);
3569
3570 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3571 /* ignore lock owners */
3572 if (local->st_stateowner->so_is_open_owner == 0)
3573 continue;
3574 if (local->st_stateowner != &oo->oo_owner)
3575 continue;
3576 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
3577 ret = local;
3578 refcount_inc(&ret->st_stid.sc_count);
3579 break;
3580 }
3581 }
3582 return ret;
3583 }
3584
3585 static __be32
3586 nfsd4_verify_open_stid(struct nfs4_stid *s)
3587 {
3588 __be32 ret = nfs_ok;
3589
3590 switch (s->sc_type) {
3591 default:
3592 break;
3593 case NFS4_CLOSED_STID:
3594 case NFS4_CLOSED_DELEG_STID:
3595 ret = nfserr_bad_stateid;
3596 break;
3597 case NFS4_REVOKED_DELEG_STID:
3598 ret = nfserr_deleg_revoked;
3599 }
3600 return ret;
3601 }
3602
3603 /* Lock the stateid st_mutex, and deal with races with CLOSE */
3604 static __be32
3605 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
3606 {
3607 __be32 ret;
3608
3609 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
3610 ret = nfsd4_verify_open_stid(&stp->st_stid);
3611 if (ret != nfs_ok)
3612 mutex_unlock(&stp->st_mutex);
3613 return ret;
3614 }
3615
3616 static struct nfs4_ol_stateid *
3617 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3618 {
3619 struct nfs4_ol_stateid *stp;
3620 for (;;) {
3621 spin_lock(&fp->fi_lock);
3622 stp = nfsd4_find_existing_open(fp, open);
3623 spin_unlock(&fp->fi_lock);
3624 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
3625 break;
3626 nfs4_put_stid(&stp->st_stid);
3627 }
3628 return stp;
3629 }
3630
3631 static struct nfs4_openowner *
3632 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3633 struct nfsd4_compound_state *cstate)
3634 {
3635 struct nfs4_client *clp = cstate->clp;
3636 struct nfs4_openowner *oo, *ret;
3637
3638 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3639 if (!oo)
3640 return NULL;
3641 oo->oo_owner.so_ops = &openowner_ops;
3642 oo->oo_owner.so_is_open_owner = 1;
3643 oo->oo_owner.so_seqid = open->op_seqid;
3644 oo->oo_flags = 0;
3645 if (nfsd4_has_session(cstate))
3646 oo->oo_flags |= NFS4_OO_CONFIRMED;
3647 oo->oo_time = 0;
3648 oo->oo_last_closed_stid = NULL;
3649 INIT_LIST_HEAD(&oo->oo_close_lru);
3650 spin_lock(&clp->cl_lock);
3651 ret = find_openstateowner_str_locked(strhashval, open, clp);
3652 if (ret == NULL) {
3653 hash_openowner(oo, clp, strhashval);
3654 ret = oo;
3655 } else
3656 nfs4_free_stateowner(&oo->oo_owner);
3657
3658 spin_unlock(&clp->cl_lock);
3659 return ret;
3660 }
3661
3662 static struct nfs4_ol_stateid *
3663 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
3664 {
3665
3666 struct nfs4_openowner *oo = open->op_openowner;
3667 struct nfs4_ol_stateid *retstp = NULL;
3668 struct nfs4_ol_stateid *stp;
3669
3670 stp = open->op_stp;
3671 /* We are moving these outside of the spinlocks to avoid the warnings */
3672 mutex_init(&stp->st_mutex);
3673 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
3674
3675 retry:
3676 spin_lock(&oo->oo_owner.so_client->cl_lock);
3677 spin_lock(&fp->fi_lock);
3678
3679 retstp = nfsd4_find_existing_open(fp, open);
3680 if (retstp)
3681 goto out_unlock;
3682
3683 open->op_stp = NULL;
3684 refcount_inc(&stp->st_stid.sc_count);
3685 stp->st_stid.sc_type = NFS4_OPEN_STID;
3686 INIT_LIST_HEAD(&stp->st_locks);
3687 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3688 get_nfs4_file(fp);
3689 stp->st_stid.sc_file = fp;
3690 stp->st_access_bmap = 0;
3691 stp->st_deny_bmap = 0;
3692 stp->st_openstp = NULL;
3693 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3694 list_add(&stp->st_perfile, &fp->fi_stateids);
3695
3696 out_unlock:
3697 spin_unlock(&fp->fi_lock);
3698 spin_unlock(&oo->oo_owner.so_client->cl_lock);
3699 if (retstp) {
3700 /* Handle races with CLOSE */
3701 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
3702 nfs4_put_stid(&retstp->st_stid);
3703 goto retry;
3704 }
3705 /* To keep mutex tracking happy */
3706 mutex_unlock(&stp->st_mutex);
3707 stp = retstp;
3708 }
3709 return stp;
3710 }
3711
3712 /*
3713 * In the 4.0 case we need to keep the owners around a little while to handle
3714 * CLOSE replay. We still do need to release any file access that is held by
3715 * them before returning however.
3716 */
3717 static void
3718 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3719 {
3720 struct nfs4_ol_stateid *last;
3721 struct nfs4_openowner *oo = openowner(s->st_stateowner);
3722 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3723 nfsd_net_id);
3724
3725 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3726
3727 /*
3728 * We know that we hold one reference via nfsd4_close, and another
3729 * "persistent" reference for the client. If the refcount is higher
3730 * than 2, then there are still calls in progress that are using this
3731 * stateid. We can't put the sc_file reference until they are finished.
3732 * Wait for the refcount to drop to 2. Since it has been unhashed,
3733 * there should be no danger of the refcount going back up again at
3734 * this point.
3735 */
3736 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
3737
3738 release_all_access(s);
3739 if (s->st_stid.sc_file) {
3740 put_nfs4_file(s->st_stid.sc_file);
3741 s->st_stid.sc_file = NULL;
3742 }
3743
3744 spin_lock(&nn->client_lock);
3745 last = oo->oo_last_closed_stid;
3746 oo->oo_last_closed_stid = s;
3747 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3748 oo->oo_time = get_seconds();
3749 spin_unlock(&nn->client_lock);
3750 if (last)
3751 nfs4_put_stid(&last->st_stid);
3752 }
3753
3754 /* search file_hashtbl[] for file */
3755 static struct nfs4_file *
3756 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3757 {
3758 struct nfs4_file *fp;
3759
3760 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3761 if (fh_match(&fp->fi_fhandle, fh)) {
3762 if (refcount_inc_not_zero(&fp->fi_ref))
3763 return fp;
3764 }
3765 }
3766 return NULL;
3767 }
3768
3769 struct nfs4_file *
3770 find_file(struct knfsd_fh *fh)
3771 {
3772 struct nfs4_file *fp;
3773 unsigned int hashval = file_hashval(fh);
3774
3775 rcu_read_lock();
3776 fp = find_file_locked(fh, hashval);
3777 rcu_read_unlock();
3778 return fp;
3779 }
3780
3781 static struct nfs4_file *
3782 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3783 {
3784 struct nfs4_file *fp;
3785 unsigned int hashval = file_hashval(fh);
3786
3787 rcu_read_lock();
3788 fp = find_file_locked(fh, hashval);
3789 rcu_read_unlock();
3790 if (fp)
3791 return fp;
3792
3793 spin_lock(&state_lock);
3794 fp = find_file_locked(fh, hashval);
3795 if (likely(fp == NULL)) {
3796 nfsd4_init_file(fh, hashval, new);
3797 fp = new;
3798 }
3799 spin_unlock(&state_lock);
3800
3801 return fp;
3802 }
3803
3804 /*
3805 * Called to check deny when READ with all zero stateid or
3806 * WRITE with all zero or all one stateid
3807 */
3808 static __be32
3809 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3810 {
3811 struct nfs4_file *fp;
3812 __be32 ret = nfs_ok;
3813
3814 fp = find_file(&current_fh->fh_handle);
3815 if (!fp)
3816 return ret;
3817 /* Check for conflicting share reservations */
3818 spin_lock(&fp->fi_lock);
3819 if (fp->fi_share_deny & deny_type)
3820 ret = nfserr_locked;
3821 spin_unlock(&fp->fi_lock);
3822 put_nfs4_file(fp);
3823 return ret;
3824 }
3825
3826 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3827 {
3828 struct nfs4_delegation *dp = cb_to_delegation(cb);
3829 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3830 nfsd_net_id);
3831
3832 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3833
3834 /*
3835 * We can't do this in nfsd_break_deleg_cb because it is
3836 * already holding inode->i_lock.
3837 *
3838 * If the dl_time != 0, then we know that it has already been
3839 * queued for a lease break. Don't queue it again.
3840 */
3841 spin_lock(&state_lock);
3842 if (dp->dl_time == 0) {
3843 dp->dl_time = get_seconds();
3844 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3845 }
3846 spin_unlock(&state_lock);
3847 }
3848
3849 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3850 struct rpc_task *task)
3851 {
3852 struct nfs4_delegation *dp = cb_to_delegation(cb);
3853
3854 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
3855 return 1;
3856
3857 switch (task->tk_status) {
3858 case 0:
3859 return 1;
3860 case -EBADHANDLE:
3861 case -NFS4ERR_BAD_STATEID:
3862 /*
3863 * Race: client probably got cb_recall before open reply
3864 * granting delegation.
3865 */
3866 if (dp->dl_retries--) {
3867 rpc_delay(task, 2 * HZ);
3868 return 0;
3869 }
3870 /*FALLTHRU*/
3871 default:
3872 return -1;
3873 }
3874 }
3875
3876 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3877 {
3878 struct nfs4_delegation *dp = cb_to_delegation(cb);
3879
3880 nfs4_put_stid(&dp->dl_stid);
3881 }
3882
3883 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3884 .prepare = nfsd4_cb_recall_prepare,
3885 .done = nfsd4_cb_recall_done,
3886 .release = nfsd4_cb_recall_release,
3887 };
3888
3889 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3890 {
3891 /*
3892 * We're assuming the state code never drops its reference
3893 * without first removing the lease. Since we're in this lease
3894 * callback (and since the lease code is serialized by the kernel
3895 * lock) we know the server hasn't removed the lease yet, we know
3896 * it's safe to take a reference.
3897 */
3898 refcount_inc(&dp->dl_stid.sc_count);
3899 nfsd4_run_cb(&dp->dl_recall);
3900 }
3901
3902 /* Called from break_lease() with i_lock held. */
3903 static bool
3904 nfsd_break_deleg_cb(struct file_lock *fl)
3905 {
3906 bool ret = false;
3907 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3908 struct nfs4_delegation *dp;
3909
3910 if (!fp) {
3911 WARN(1, "(%p)->fl_owner NULL\n", fl);
3912 return ret;
3913 }
3914 if (fp->fi_had_conflict) {
3915 WARN(1, "duplicate break on %p\n", fp);
3916 return ret;
3917 }
3918 /*
3919 * We don't want the locks code to timeout the lease for us;
3920 * we'll remove it ourself if a delegation isn't returned
3921 * in time:
3922 */
3923 fl->fl_break_time = 0;
3924
3925 spin_lock(&fp->fi_lock);
3926 fp->fi_had_conflict = true;
3927 /*
3928 * If there are no delegations on the list, then return true
3929 * so that the lease code will go ahead and delete it.
3930 */
3931 if (list_empty(&fp->fi_delegations))
3932 ret = true;
3933 else
3934 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3935 nfsd_break_one_deleg(dp);
3936 spin_unlock(&fp->fi_lock);
3937 return ret;
3938 }
3939
3940 static int
3941 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3942 struct list_head *dispose)
3943 {
3944 if (arg & F_UNLCK)
3945 return lease_modify(onlist, arg, dispose);
3946 else
3947 return -EAGAIN;
3948 }
3949
3950 static const struct lock_manager_operations nfsd_lease_mng_ops = {
3951 .lm_break = nfsd_break_deleg_cb,
3952 .lm_change = nfsd_change_deleg_cb,
3953 };
3954
3955 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3956 {
3957 if (nfsd4_has_session(cstate))
3958 return nfs_ok;
3959 if (seqid == so->so_seqid - 1)
3960 return nfserr_replay_me;
3961 if (seqid == so->so_seqid)
3962 return nfs_ok;
3963 return nfserr_bad_seqid;
3964 }
3965
3966 static __be32 lookup_clientid(clientid_t *clid,
3967 struct nfsd4_compound_state *cstate,
3968 struct nfsd_net *nn)
3969 {
3970 struct nfs4_client *found;
3971
3972 if (cstate->clp) {
3973 found = cstate->clp;
3974 if (!same_clid(&found->cl_clientid, clid))
3975 return nfserr_stale_clientid;
3976 return nfs_ok;
3977 }
3978
3979 if (STALE_CLIENTID(clid, nn))
3980 return nfserr_stale_clientid;
3981
3982 /*
3983 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3984 * cached already then we know this is for is for v4.0 and "sessions"
3985 * will be false.
3986 */
3987 WARN_ON_ONCE(cstate->session);
3988 spin_lock(&nn->client_lock);
3989 found = find_confirmed_client(clid, false, nn);
3990 if (!found) {
3991 spin_unlock(&nn->client_lock);
3992 return nfserr_expired;
3993 }
3994 atomic_inc(&found->cl_refcount);
3995 spin_unlock(&nn->client_lock);
3996
3997 /* Cache the nfs4_client in cstate! */
3998 cstate->clp = found;
3999 return nfs_ok;
4000 }
4001
4002 __be32
4003 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4004 struct nfsd4_open *open, struct nfsd_net *nn)
4005 {
4006 clientid_t *clientid = &open->op_clientid;
4007 struct nfs4_client *clp = NULL;
4008 unsigned int strhashval;
4009 struct nfs4_openowner *oo = NULL;
4010 __be32 status;
4011
4012 if (STALE_CLIENTID(&open->op_clientid, nn))
4013 return nfserr_stale_clientid;
4014 /*
4015 * In case we need it later, after we've already created the
4016 * file and don't want to risk a further failure:
4017 */
4018 open->op_file = nfsd4_alloc_file();
4019 if (open->op_file == NULL)
4020 return nfserr_jukebox;
4021
4022 status = lookup_clientid(clientid, cstate, nn);
4023 if (status)
4024 return status;
4025 clp = cstate->clp;
4026
4027 strhashval = ownerstr_hashval(&open->op_owner);
4028 oo = find_openstateowner_str(strhashval, open, clp);
4029 open->op_openowner = oo;
4030 if (!oo) {
4031 goto new_owner;
4032 }
4033 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4034 /* Replace unconfirmed owners without checking for replay. */
4035 release_openowner(oo);
4036 open->op_openowner = NULL;
4037 goto new_owner;
4038 }
4039 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4040 if (status)
4041 return status;
4042 goto alloc_stateid;
4043 new_owner:
4044 oo = alloc_init_open_stateowner(strhashval, open, cstate);
4045 if (oo == NULL)
4046 return nfserr_jukebox;
4047 open->op_openowner = oo;
4048 alloc_stateid:
4049 open->op_stp = nfs4_alloc_open_stateid(clp);
4050 if (!open->op_stp)
4051 return nfserr_jukebox;
4052
4053 if (nfsd4_has_session(cstate) &&
4054 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4055 open->op_odstate = alloc_clnt_odstate(clp);
4056 if (!open->op_odstate)
4057 return nfserr_jukebox;
4058 }
4059
4060 return nfs_ok;
4061 }
4062
4063 static inline __be32
4064 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4065 {
4066 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4067 return nfserr_openmode;
4068 else
4069 return nfs_ok;
4070 }
4071
4072 static int share_access_to_flags(u32 share_access)
4073 {
4074 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4075 }
4076
4077 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4078 {
4079 struct nfs4_stid *ret;
4080
4081 ret = find_stateid_by_type(cl, s,
4082 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4083 if (!ret)
4084 return NULL;
4085 return delegstateid(ret);
4086 }
4087
4088 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4089 {
4090 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4091 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4092 }
4093
4094 static __be32
4095 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4096 struct nfs4_delegation **dp)
4097 {
4098 int flags;
4099 __be32 status = nfserr_bad_stateid;
4100 struct nfs4_delegation *deleg;
4101
4102 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4103 if (deleg == NULL)
4104 goto out;
4105 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4106 nfs4_put_stid(&deleg->dl_stid);
4107 if (cl->cl_minorversion)
4108 status = nfserr_deleg_revoked;
4109 goto out;
4110 }
4111 flags = share_access_to_flags(open->op_share_access);
4112 status = nfs4_check_delegmode(deleg, flags);
4113 if (status) {
4114 nfs4_put_stid(&deleg->dl_stid);
4115 goto out;
4116 }
4117 *dp = deleg;
4118 out:
4119 if (!nfsd4_is_deleg_cur(open))
4120 return nfs_ok;
4121 if (status)
4122 return status;
4123 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4124 return nfs_ok;
4125 }
4126
4127 static inline int nfs4_access_to_access(u32 nfs4_access)
4128 {
4129 int flags = 0;
4130
4131 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4132 flags |= NFSD_MAY_READ;
4133 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4134 flags |= NFSD_MAY_WRITE;
4135 return flags;
4136 }
4137
4138 static inline __be32
4139 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4140 struct nfsd4_open *open)
4141 {
4142 struct iattr iattr = {
4143 .ia_valid = ATTR_SIZE,
4144 .ia_size = 0,
4145 };
4146 if (!open->op_truncate)
4147 return 0;
4148 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4149 return nfserr_inval;
4150 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
4151 }
4152
4153 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4154 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4155 struct nfsd4_open *open)
4156 {
4157 struct file *filp = NULL;
4158 __be32 status;
4159 int oflag = nfs4_access_to_omode(open->op_share_access);
4160 int access = nfs4_access_to_access(open->op_share_access);
4161 unsigned char old_access_bmap, old_deny_bmap;
4162
4163 spin_lock(&fp->fi_lock);
4164
4165 /*
4166 * Are we trying to set a deny mode that would conflict with
4167 * current access?
4168 */
4169 status = nfs4_file_check_deny(fp, open->op_share_deny);
4170 if (status != nfs_ok) {
4171 spin_unlock(&fp->fi_lock);
4172 goto out;
4173 }
4174
4175 /* set access to the file */
4176 status = nfs4_file_get_access(fp, open->op_share_access);
4177 if (status != nfs_ok) {
4178 spin_unlock(&fp->fi_lock);
4179 goto out;
4180 }
4181
4182 /* Set access bits in stateid */
4183 old_access_bmap = stp->st_access_bmap;
4184 set_access(open->op_share_access, stp);
4185
4186 /* Set new deny mask */
4187 old_deny_bmap = stp->st_deny_bmap;
4188 set_deny(open->op_share_deny, stp);
4189 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4190
4191 if (!fp->fi_fds[oflag]) {
4192 spin_unlock(&fp->fi_lock);
4193 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
4194 if (status)
4195 goto out_put_access;
4196 spin_lock(&fp->fi_lock);
4197 if (!fp->fi_fds[oflag]) {
4198 fp->fi_fds[oflag] = filp;
4199 filp = NULL;
4200 }
4201 }
4202 spin_unlock(&fp->fi_lock);
4203 if (filp)
4204 fput(filp);
4205
4206 status = nfsd4_truncate(rqstp, cur_fh, open);
4207 if (status)
4208 goto out_put_access;
4209 out:
4210 return status;
4211 out_put_access:
4212 stp->st_access_bmap = old_access_bmap;
4213 nfs4_file_put_access(fp, open->op_share_access);
4214 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4215 goto out;
4216 }
4217
4218 static __be32
4219 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4220 {
4221 __be32 status;
4222 unsigned char old_deny_bmap = stp->st_deny_bmap;
4223
4224 if (!test_access(open->op_share_access, stp))
4225 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4226
4227 /* test and set deny mode */
4228 spin_lock(&fp->fi_lock);
4229 status = nfs4_file_check_deny(fp, open->op_share_deny);
4230 if (status == nfs_ok) {
4231 set_deny(open->op_share_deny, stp);
4232 fp->fi_share_deny |=
4233 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4234 }
4235 spin_unlock(&fp->fi_lock);
4236
4237 if (status != nfs_ok)
4238 return status;
4239
4240 status = nfsd4_truncate(rqstp, cur_fh, open);
4241 if (status != nfs_ok)
4242 reset_union_bmap_deny(old_deny_bmap, stp);
4243 return status;
4244 }
4245
4246 /* Should we give out recallable state?: */
4247 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4248 {
4249 if (clp->cl_cb_state == NFSD4_CB_UP)
4250 return true;
4251 /*
4252 * In the sessions case, since we don't have to establish a
4253 * separate connection for callbacks, we assume it's OK
4254 * until we hear otherwise:
4255 */
4256 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4257 }
4258
4259 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
4260 {
4261 struct file_lock *fl;
4262
4263 fl = locks_alloc_lock();
4264 if (!fl)
4265 return NULL;
4266 fl->fl_lmops = &nfsd_lease_mng_ops;
4267 fl->fl_flags = FL_DELEG;
4268 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4269 fl->fl_end = OFFSET_MAX;
4270 fl->fl_owner = (fl_owner_t)fp;
4271 fl->fl_pid = current->tgid;
4272 return fl;
4273 }
4274
4275 /**
4276 * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
4277 * @dp: a pointer to the nfs4_delegation we're adding.
4278 *
4279 * Return:
4280 * On success: Return code will be 0 on success.
4281 *
4282 * On error: -EAGAIN if there was an existing delegation.
4283 * nonzero if there is an error in other cases.
4284 *
4285 */
4286
4287 static int nfs4_setlease(struct nfs4_delegation *dp)
4288 {
4289 struct nfs4_file *fp = dp->dl_stid.sc_file;
4290 struct file_lock *fl;
4291 struct file *filp;
4292 int status = 0;
4293
4294 fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
4295 if (!fl)
4296 return -ENOMEM;
4297 filp = find_readable_file(fp);
4298 if (!filp) {
4299 /* We should always have a readable file here */
4300 WARN_ON_ONCE(1);
4301 locks_free_lock(fl);
4302 return -EBADF;
4303 }
4304 fl->fl_file = filp;
4305 status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
4306 if (fl)
4307 locks_free_lock(fl);
4308 if (status)
4309 goto out_fput;
4310 spin_lock(&state_lock);
4311 spin_lock(&fp->fi_lock);
4312 /* Did the lease get broken before we took the lock? */
4313 status = -EAGAIN;
4314 if (fp->fi_had_conflict)
4315 goto out_unlock;
4316 /* Race breaker */
4317 if (fp->fi_deleg_file) {
4318 status = hash_delegation_locked(dp, fp);
4319 goto out_unlock;
4320 }
4321 fp->fi_deleg_file = filp;
4322 fp->fi_delegees = 0;
4323 status = hash_delegation_locked(dp, fp);
4324 spin_unlock(&fp->fi_lock);
4325 spin_unlock(&state_lock);
4326 if (status) {
4327 /* Should never happen, this is a new fi_deleg_file */
4328 WARN_ON_ONCE(1);
4329 goto out_fput;
4330 }
4331 return 0;
4332 out_unlock:
4333 spin_unlock(&fp->fi_lock);
4334 spin_unlock(&state_lock);
4335 out_fput:
4336 fput(filp);
4337 return status;
4338 }
4339
4340 static struct nfs4_delegation *
4341 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4342 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4343 {
4344 int status;
4345 struct nfs4_delegation *dp;
4346
4347 if (fp->fi_had_conflict)
4348 return ERR_PTR(-EAGAIN);
4349
4350 spin_lock(&state_lock);
4351 spin_lock(&fp->fi_lock);
4352 status = nfs4_get_existing_delegation(clp, fp);
4353 spin_unlock(&fp->fi_lock);
4354 spin_unlock(&state_lock);
4355
4356 if (status)
4357 return ERR_PTR(status);
4358
4359 dp = alloc_init_deleg(clp, fh, odstate);
4360 if (!dp)
4361 return ERR_PTR(-ENOMEM);
4362
4363 get_nfs4_file(fp);
4364 spin_lock(&state_lock);
4365 spin_lock(&fp->fi_lock);
4366 dp->dl_stid.sc_file = fp;
4367 if (!fp->fi_deleg_file) {
4368 spin_unlock(&fp->fi_lock);
4369 spin_unlock(&state_lock);
4370 status = nfs4_setlease(dp);
4371 goto out;
4372 }
4373 if (fp->fi_had_conflict) {
4374 status = -EAGAIN;
4375 goto out_unlock;
4376 }
4377 status = hash_delegation_locked(dp, fp);
4378 out_unlock:
4379 spin_unlock(&fp->fi_lock);
4380 spin_unlock(&state_lock);
4381 out:
4382 if (status) {
4383 put_clnt_odstate(dp->dl_clnt_odstate);
4384 nfs4_put_stid(&dp->dl_stid);
4385 return ERR_PTR(status);
4386 }
4387 return dp;
4388 }
4389
4390 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4391 {
4392 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4393 if (status == -EAGAIN)
4394 open->op_why_no_deleg = WND4_CONTENTION;
4395 else {
4396 open->op_why_no_deleg = WND4_RESOURCE;
4397 switch (open->op_deleg_want) {
4398 case NFS4_SHARE_WANT_READ_DELEG:
4399 case NFS4_SHARE_WANT_WRITE_DELEG:
4400 case NFS4_SHARE_WANT_ANY_DELEG:
4401 break;
4402 case NFS4_SHARE_WANT_CANCEL:
4403 open->op_why_no_deleg = WND4_CANCELLED;
4404 break;
4405 case NFS4_SHARE_WANT_NO_DELEG:
4406 WARN_ON_ONCE(1);
4407 }
4408 }
4409 }
4410
4411 /*
4412 * Attempt to hand out a delegation.
4413 *
4414 * Note we don't support write delegations, and won't until the vfs has
4415 * proper support for them.
4416 */
4417 static void
4418 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4419 struct nfs4_ol_stateid *stp)
4420 {
4421 struct nfs4_delegation *dp;
4422 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4423 struct nfs4_client *clp = stp->st_stid.sc_client;
4424 int cb_up;
4425 int status = 0;
4426
4427 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4428 open->op_recall = 0;
4429 switch (open->op_claim_type) {
4430 case NFS4_OPEN_CLAIM_PREVIOUS:
4431 if (!cb_up)
4432 open->op_recall = 1;
4433 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4434 goto out_no_deleg;
4435 break;
4436 case NFS4_OPEN_CLAIM_NULL:
4437 case NFS4_OPEN_CLAIM_FH:
4438 /*
4439 * Let's not give out any delegations till everyone's
4440 * had the chance to reclaim theirs, *and* until
4441 * NLM locks have all been reclaimed:
4442 */
4443 if (locks_in_grace(clp->net))
4444 goto out_no_deleg;
4445 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4446 goto out_no_deleg;
4447 /*
4448 * Also, if the file was opened for write or
4449 * create, there's a good chance the client's
4450 * about to write to it, resulting in an
4451 * immediate recall (since we don't support
4452 * write delegations):
4453 */
4454 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4455 goto out_no_deleg;
4456 if (open->op_create == NFS4_OPEN_CREATE)
4457 goto out_no_deleg;
4458 break;
4459 default:
4460 goto out_no_deleg;
4461 }
4462 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
4463 if (IS_ERR(dp))
4464 goto out_no_deleg;
4465
4466 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
4467
4468 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
4469 STATEID_VAL(&dp->dl_stid.sc_stateid));
4470 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
4471 nfs4_put_stid(&dp->dl_stid);
4472 return;
4473 out_no_deleg:
4474 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4475 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4476 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4477 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4478 open->op_recall = 1;
4479 }
4480
4481 /* 4.1 client asking for a delegation? */
4482 if (open->op_deleg_want)
4483 nfsd4_open_deleg_none_ext(open, status);
4484 return;
4485 }
4486
4487 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4488 struct nfs4_delegation *dp)
4489 {
4490 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4491 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4492 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4493 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4494 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4495 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4496 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4497 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4498 }
4499 /* Otherwise the client must be confused wanting a delegation
4500 * it already has, therefore we don't return
4501 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4502 */
4503 }
4504
4505 __be32
4506 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4507 {
4508 struct nfsd4_compoundres *resp = rqstp->rq_resp;
4509 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4510 struct nfs4_file *fp = NULL;
4511 struct nfs4_ol_stateid *stp = NULL;
4512 struct nfs4_delegation *dp = NULL;
4513 __be32 status;
4514 bool new_stp = false;
4515
4516 /*
4517 * Lookup file; if found, lookup stateid and check open request,
4518 * and check for delegations in the process of being recalled.
4519 * If not found, create the nfs4_file struct
4520 */
4521 fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
4522 if (fp != open->op_file) {
4523 status = nfs4_check_deleg(cl, open, &dp);
4524 if (status)
4525 goto out;
4526 stp = nfsd4_find_and_lock_existing_open(fp, open);
4527 } else {
4528 open->op_file = NULL;
4529 status = nfserr_bad_stateid;
4530 if (nfsd4_is_deleg_cur(open))
4531 goto out;
4532 }
4533
4534 if (!stp) {
4535 stp = init_open_stateid(fp, open);
4536 if (!open->op_stp)
4537 new_stp = true;
4538 }
4539
4540 /*
4541 * OPEN the file, or upgrade an existing OPEN.
4542 * If truncate fails, the OPEN fails.
4543 *
4544 * stp is already locked.
4545 */
4546 if (!new_stp) {
4547 /* Stateid was found, this is an OPEN upgrade */
4548 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4549 if (status) {
4550 mutex_unlock(&stp->st_mutex);
4551 goto out;
4552 }
4553 } else {
4554 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4555 if (status) {
4556 stp->st_stid.sc_type = NFS4_CLOSED_STID;
4557 release_open_stateid(stp);
4558 mutex_unlock(&stp->st_mutex);
4559 goto out;
4560 }
4561
4562 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4563 open->op_odstate);
4564 if (stp->st_clnt_odstate == open->op_odstate)
4565 open->op_odstate = NULL;
4566 }
4567
4568 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4569 mutex_unlock(&stp->st_mutex);
4570
4571 if (nfsd4_has_session(&resp->cstate)) {
4572 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4573 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4574 open->op_why_no_deleg = WND4_NOT_WANTED;
4575 goto nodeleg;
4576 }
4577 }
4578
4579 /*
4580 * Attempt to hand out a delegation. No error return, because the
4581 * OPEN succeeds even if we fail.
4582 */
4583 nfs4_open_delegation(current_fh, open, stp);
4584 nodeleg:
4585 status = nfs_ok;
4586
4587 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4588 STATEID_VAL(&stp->st_stid.sc_stateid));
4589 out:
4590 /* 4.1 client trying to upgrade/downgrade delegation? */
4591 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4592 open->op_deleg_want)
4593 nfsd4_deleg_xgrade_none_ext(open, dp);
4594
4595 if (fp)
4596 put_nfs4_file(fp);
4597 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4598 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4599 /*
4600 * To finish the open response, we just need to set the rflags.
4601 */
4602 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4603 if (nfsd4_has_session(&resp->cstate))
4604 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
4605 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
4606 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4607
4608 if (dp)
4609 nfs4_put_stid(&dp->dl_stid);
4610 if (stp)
4611 nfs4_put_stid(&stp->st_stid);
4612
4613 return status;
4614 }
4615
4616 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4617 struct nfsd4_open *open)
4618 {
4619 if (open->op_openowner) {
4620 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4621
4622 nfsd4_cstate_assign_replay(cstate, so);
4623 nfs4_put_stateowner(so);
4624 }
4625 if (open->op_file)
4626 kmem_cache_free(file_slab, open->op_file);
4627 if (open->op_stp)
4628 nfs4_put_stid(&open->op_stp->st_stid);
4629 if (open->op_odstate)
4630 kmem_cache_free(odstate_slab, open->op_odstate);
4631 }
4632
4633 __be32
4634 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4635 union nfsd4_op_u *u)
4636 {
4637 clientid_t *clid = &u->renew;
4638 struct nfs4_client *clp;
4639 __be32 status;
4640 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4641
4642 dprintk("process_renew(%08x/%08x): starting\n",
4643 clid->cl_boot, clid->cl_id);
4644 status = lookup_clientid(clid, cstate, nn);
4645 if (status)
4646 goto out;
4647 clp = cstate->clp;
4648 status = nfserr_cb_path_down;
4649 if (!list_empty(&clp->cl_delegations)
4650 && clp->cl_cb_state != NFSD4_CB_UP)
4651 goto out;
4652 status = nfs_ok;
4653 out:
4654 return status;
4655 }
4656
4657 void
4658 nfsd4_end_grace(struct nfsd_net *nn)
4659 {
4660 /* do nothing if grace period already ended */
4661 if (nn->grace_ended)
4662 return;
4663
4664 dprintk("NFSD: end of grace period\n");
4665 nn->grace_ended = true;
4666 /*
4667 * If the server goes down again right now, an NFSv4
4668 * client will still be allowed to reclaim after it comes back up,
4669 * even if it hasn't yet had a chance to reclaim state this time.
4670 *
4671 */
4672 nfsd4_record_grace_done(nn);
4673 /*
4674 * At this point, NFSv4 clients can still reclaim. But if the
4675 * server crashes, any that have not yet reclaimed will be out
4676 * of luck on the next boot.
4677 *
4678 * (NFSv4.1+ clients are considered to have reclaimed once they
4679 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4680 * have reclaimed after their first OPEN.)
4681 */
4682 locks_end_grace(&nn->nfsd4_manager);
4683 /*
4684 * At this point, and once lockd and/or any other containers
4685 * exit their grace period, further reclaims will fail and
4686 * regular locking can resume.
4687 */
4688 }
4689
4690 static time_t
4691 nfs4_laundromat(struct nfsd_net *nn)
4692 {
4693 struct nfs4_client *clp;
4694 struct nfs4_openowner *oo;
4695 struct nfs4_delegation *dp;
4696 struct nfs4_ol_stateid *stp;
4697 struct nfsd4_blocked_lock *nbl;
4698 struct list_head *pos, *next, reaplist;
4699 time_t cutoff = get_seconds() - nn->nfsd4_lease;
4700 time_t t, new_timeo = nn->nfsd4_lease;
4701
4702 dprintk("NFSD: laundromat service - starting\n");
4703 nfsd4_end_grace(nn);
4704 INIT_LIST_HEAD(&reaplist);
4705 spin_lock(&nn->client_lock);
4706 list_for_each_safe(pos, next, &nn->client_lru) {
4707 clp = list_entry(pos, struct nfs4_client, cl_lru);
4708 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4709 t = clp->cl_time - cutoff;
4710 new_timeo = min(new_timeo, t);
4711 break;
4712 }
4713 if (mark_client_expired_locked(clp)) {
4714 dprintk("NFSD: client in use (clientid %08x)\n",
4715 clp->cl_clientid.cl_id);
4716 continue;
4717 }
4718 list_add(&clp->cl_lru, &reaplist);
4719 }
4720 spin_unlock(&nn->client_lock);
4721 list_for_each_safe(pos, next, &reaplist) {
4722 clp = list_entry(pos, struct nfs4_client, cl_lru);
4723 dprintk("NFSD: purging unused client (clientid %08x)\n",
4724 clp->cl_clientid.cl_id);
4725 list_del_init(&clp->cl_lru);
4726 expire_client(clp);
4727 }
4728 spin_lock(&state_lock);
4729 list_for_each_safe(pos, next, &nn->del_recall_lru) {
4730 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4731 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4732 t = dp->dl_time - cutoff;
4733 new_timeo = min(new_timeo, t);
4734 break;
4735 }
4736 WARN_ON(!unhash_delegation_locked(dp));
4737 list_add(&dp->dl_recall_lru, &reaplist);
4738 }
4739 spin_unlock(&state_lock);
4740 while (!list_empty(&reaplist)) {
4741 dp = list_first_entry(&reaplist, struct nfs4_delegation,
4742 dl_recall_lru);
4743 list_del_init(&dp->dl_recall_lru);
4744 revoke_delegation(dp);
4745 }
4746
4747 spin_lock(&nn->client_lock);
4748 while (!list_empty(&nn->close_lru)) {
4749 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4750 oo_close_lru);
4751 if (time_after((unsigned long)oo->oo_time,
4752 (unsigned long)cutoff)) {
4753 t = oo->oo_time - cutoff;
4754 new_timeo = min(new_timeo, t);
4755 break;
4756 }
4757 list_del_init(&oo->oo_close_lru);
4758 stp = oo->oo_last_closed_stid;
4759 oo->oo_last_closed_stid = NULL;
4760 spin_unlock(&nn->client_lock);
4761 nfs4_put_stid(&stp->st_stid);
4762 spin_lock(&nn->client_lock);
4763 }
4764 spin_unlock(&nn->client_lock);
4765
4766 /*
4767 * It's possible for a client to try and acquire an already held lock
4768 * that is being held for a long time, and then lose interest in it.
4769 * So, we clean out any un-revisited request after a lease period
4770 * under the assumption that the client is no longer interested.
4771 *
4772 * RFC5661, sec. 9.6 states that the client must not rely on getting
4773 * notifications and must continue to poll for locks, even when the
4774 * server supports them. Thus this shouldn't lead to clients blocking
4775 * indefinitely once the lock does become free.
4776 */
4777 BUG_ON(!list_empty(&reaplist));
4778 spin_lock(&nn->blocked_locks_lock);
4779 while (!list_empty(&nn->blocked_locks_lru)) {
4780 nbl = list_first_entry(&nn->blocked_locks_lru,
4781 struct nfsd4_blocked_lock, nbl_lru);
4782 if (time_after((unsigned long)nbl->nbl_time,
4783 (unsigned long)cutoff)) {
4784 t = nbl->nbl_time - cutoff;
4785 new_timeo = min(new_timeo, t);
4786 break;
4787 }
4788 list_move(&nbl->nbl_lru, &reaplist);
4789 list_del_init(&nbl->nbl_list);
4790 }
4791 spin_unlock(&nn->blocked_locks_lock);
4792
4793 while (!list_empty(&reaplist)) {
4794 nbl = list_first_entry(&reaplist,
4795 struct nfsd4_blocked_lock, nbl_lru);
4796 list_del_init(&nbl->nbl_lru);
4797 posix_unblock_lock(&nbl->nbl_lock);
4798 free_blocked_lock(nbl);
4799 }
4800
4801 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4802 return new_timeo;
4803 }
4804
4805 static struct workqueue_struct *laundry_wq;
4806 static void laundromat_main(struct work_struct *);
4807
4808 static void
4809 laundromat_main(struct work_struct *laundry)
4810 {
4811 time_t t;
4812 struct delayed_work *dwork = to_delayed_work(laundry);
4813 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4814 laundromat_work);
4815
4816 t = nfs4_laundromat(nn);
4817 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4818 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4819 }
4820
4821 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4822 {
4823 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4824 return nfserr_bad_stateid;
4825 return nfs_ok;
4826 }
4827
4828 static inline int
4829 access_permit_read(struct nfs4_ol_stateid *stp)
4830 {
4831 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4832 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4833 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4834 }
4835
4836 static inline int
4837 access_permit_write(struct nfs4_ol_stateid *stp)
4838 {
4839 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4840 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4841 }
4842
4843 static
4844 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4845 {
4846 __be32 status = nfserr_openmode;
4847
4848 /* For lock stateid's, we test the parent open, not the lock: */
4849 if (stp->st_openstp)
4850 stp = stp->st_openstp;
4851 if ((flags & WR_STATE) && !access_permit_write(stp))
4852 goto out;
4853 if ((flags & RD_STATE) && !access_permit_read(stp))
4854 goto out;
4855 status = nfs_ok;
4856 out:
4857 return status;
4858 }
4859
4860 static inline __be32
4861 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4862 {
4863 if (ONE_STATEID(stateid) && (flags & RD_STATE))
4864 return nfs_ok;
4865 else if (opens_in_grace(net)) {
4866 /* Answer in remaining cases depends on existence of
4867 * conflicting state; so we must wait out the grace period. */
4868 return nfserr_grace;
4869 } else if (flags & WR_STATE)
4870 return nfs4_share_conflict(current_fh,
4871 NFS4_SHARE_DENY_WRITE);
4872 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4873 return nfs4_share_conflict(current_fh,
4874 NFS4_SHARE_DENY_READ);
4875 }
4876
4877 /*
4878 * Allow READ/WRITE during grace period on recovered state only for files
4879 * that are not able to provide mandatory locking.
4880 */
4881 static inline int
4882 grace_disallows_io(struct net *net, struct inode *inode)
4883 {
4884 return opens_in_grace(net) && mandatory_lock(inode);
4885 }
4886
4887 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4888 {
4889 /*
4890 * When sessions are used the stateid generation number is ignored
4891 * when it is zero.
4892 */
4893 if (has_session && in->si_generation == 0)
4894 return nfs_ok;
4895
4896 if (in->si_generation == ref->si_generation)
4897 return nfs_ok;
4898
4899 /* If the client sends us a stateid from the future, it's buggy: */
4900 if (nfsd4_stateid_generation_after(in, ref))
4901 return nfserr_bad_stateid;
4902 /*
4903 * However, we could see a stateid from the past, even from a
4904 * non-buggy client. For example, if the client sends a lock
4905 * while some IO is outstanding, the lock may bump si_generation
4906 * while the IO is still in flight. The client could avoid that
4907 * situation by waiting for responses on all the IO requests,
4908 * but better performance may result in retrying IO that
4909 * receives an old_stateid error if requests are rarely
4910 * reordered in flight:
4911 */
4912 return nfserr_old_stateid;
4913 }
4914
4915 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
4916 {
4917 __be32 ret;
4918
4919 spin_lock(&s->sc_lock);
4920 ret = nfsd4_verify_open_stid(s);
4921 if (ret == nfs_ok)
4922 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
4923 spin_unlock(&s->sc_lock);
4924 return ret;
4925 }
4926
4927 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4928 {
4929 if (ols->st_stateowner->so_is_open_owner &&
4930 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4931 return nfserr_bad_stateid;
4932 return nfs_ok;
4933 }
4934
4935 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4936 {
4937 struct nfs4_stid *s;
4938 __be32 status = nfserr_bad_stateid;
4939
4940 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4941 CLOSE_STATEID(stateid))
4942 return status;
4943 /* Client debugging aid. */
4944 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4945 char addr_str[INET6_ADDRSTRLEN];
4946 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4947 sizeof(addr_str));
4948 pr_warn_ratelimited("NFSD: client %s testing state ID "
4949 "with incorrect client ID\n", addr_str);
4950 return status;
4951 }
4952 spin_lock(&cl->cl_lock);
4953 s = find_stateid_locked(cl, stateid);
4954 if (!s)
4955 goto out_unlock;
4956 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
4957 if (status)
4958 goto out_unlock;
4959 switch (s->sc_type) {
4960 case NFS4_DELEG_STID:
4961 status = nfs_ok;
4962 break;
4963 case NFS4_REVOKED_DELEG_STID:
4964 status = nfserr_deleg_revoked;
4965 break;
4966 case NFS4_OPEN_STID:
4967 case NFS4_LOCK_STID:
4968 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
4969 break;
4970 default:
4971 printk("unknown stateid type %x\n", s->sc_type);
4972 /* Fallthrough */
4973 case NFS4_CLOSED_STID:
4974 case NFS4_CLOSED_DELEG_STID:
4975 status = nfserr_bad_stateid;
4976 }
4977 out_unlock:
4978 spin_unlock(&cl->cl_lock);
4979 return status;
4980 }
4981
4982 __be32
4983 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4984 stateid_t *stateid, unsigned char typemask,
4985 struct nfs4_stid **s, struct nfsd_net *nn)
4986 {
4987 __be32 status;
4988 bool return_revoked = false;
4989
4990 /*
4991 * only return revoked delegations if explicitly asked.
4992 * otherwise we report revoked or bad_stateid status.
4993 */
4994 if (typemask & NFS4_REVOKED_DELEG_STID)
4995 return_revoked = true;
4996 else if (typemask & NFS4_DELEG_STID)
4997 typemask |= NFS4_REVOKED_DELEG_STID;
4998
4999 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5000 CLOSE_STATEID(stateid))
5001 return nfserr_bad_stateid;
5002 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
5003 if (status == nfserr_stale_clientid) {
5004 if (cstate->session)
5005 return nfserr_bad_stateid;
5006 return nfserr_stale_stateid;
5007 }
5008 if (status)
5009 return status;
5010 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
5011 if (!*s)
5012 return nfserr_bad_stateid;
5013 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5014 nfs4_put_stid(*s);
5015 if (cstate->minorversion)
5016 return nfserr_deleg_revoked;
5017 return nfserr_bad_stateid;
5018 }
5019 return nfs_ok;
5020 }
5021
5022 static struct file *
5023 nfs4_find_file(struct nfs4_stid *s, int flags)
5024 {
5025 if (!s)
5026 return NULL;
5027
5028 switch (s->sc_type) {
5029 case NFS4_DELEG_STID:
5030 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5031 return NULL;
5032 return get_file(s->sc_file->fi_deleg_file);
5033 case NFS4_OPEN_STID:
5034 case NFS4_LOCK_STID:
5035 if (flags & RD_STATE)
5036 return find_readable_file(s->sc_file);
5037 else
5038 return find_writeable_file(s->sc_file);
5039 break;
5040 }
5041
5042 return NULL;
5043 }
5044
5045 static __be32
5046 nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
5047 {
5048 __be32 status;
5049
5050 status = nfsd4_check_openowner_confirmed(ols);
5051 if (status)
5052 return status;
5053 return nfs4_check_openmode(ols, flags);
5054 }
5055
5056 static __be32
5057 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5058 struct file **filpp, bool *tmp_file, int flags)
5059 {
5060 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5061 struct file *file;
5062 __be32 status;
5063
5064 file = nfs4_find_file(s, flags);
5065 if (file) {
5066 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5067 acc | NFSD_MAY_OWNER_OVERRIDE);
5068 if (status) {
5069 fput(file);
5070 return status;
5071 }
5072
5073 *filpp = file;
5074 } else {
5075 status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
5076 if (status)
5077 return status;
5078
5079 if (tmp_file)
5080 *tmp_file = true;
5081 }
5082
5083 return 0;
5084 }
5085
5086 /*
5087 * Checks for stateid operations
5088 */
5089 __be32
5090 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5091 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5092 stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file)
5093 {
5094 struct inode *ino = d_inode(fhp->fh_dentry);
5095 struct net *net = SVC_NET(rqstp);
5096 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5097 struct nfs4_stid *s = NULL;
5098 __be32 status;
5099
5100 if (filpp)
5101 *filpp = NULL;
5102 if (tmp_file)
5103 *tmp_file = false;
5104
5105 if (grace_disallows_io(net, ino))
5106 return nfserr_grace;
5107
5108 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5109 status = check_special_stateids(net, fhp, stateid, flags);
5110 goto done;
5111 }
5112
5113 status = nfsd4_lookup_stateid(cstate, stateid,
5114 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5115 &s, nn);
5116 if (status)
5117 return status;
5118 status = nfsd4_stid_check_stateid_generation(stateid, s,
5119 nfsd4_has_session(cstate));
5120 if (status)
5121 goto out;
5122
5123 switch (s->sc_type) {
5124 case NFS4_DELEG_STID:
5125 status = nfs4_check_delegmode(delegstateid(s), flags);
5126 break;
5127 case NFS4_OPEN_STID:
5128 case NFS4_LOCK_STID:
5129 status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
5130 break;
5131 default:
5132 status = nfserr_bad_stateid;
5133 break;
5134 }
5135 if (status)
5136 goto out;
5137 status = nfs4_check_fh(fhp, s);
5138
5139 done:
5140 if (!status && filpp)
5141 status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
5142 out:
5143 if (s)
5144 nfs4_put_stid(s);
5145 return status;
5146 }
5147
5148 /*
5149 * Test if the stateid is valid
5150 */
5151 __be32
5152 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5153 union nfsd4_op_u *u)
5154 {
5155 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5156 struct nfsd4_test_stateid_id *stateid;
5157 struct nfs4_client *cl = cstate->session->se_client;
5158
5159 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5160 stateid->ts_id_status =
5161 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5162
5163 return nfs_ok;
5164 }
5165
5166 static __be32
5167 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5168 {
5169 struct nfs4_ol_stateid *stp = openlockstateid(s);
5170 __be32 ret;
5171
5172 ret = nfsd4_lock_ol_stateid(stp);
5173 if (ret)
5174 goto out_put_stid;
5175
5176 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5177 if (ret)
5178 goto out;
5179
5180 ret = nfserr_locks_held;
5181 if (check_for_locks(stp->st_stid.sc_file,
5182 lockowner(stp->st_stateowner)))
5183 goto out;
5184
5185 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5186 release_lock_stateid(stp);
5187 ret = nfs_ok;
5188
5189 out:
5190 mutex_unlock(&stp->st_mutex);
5191 out_put_stid:
5192 nfs4_put_stid(s);
5193 return ret;
5194 }
5195
5196 __be32
5197 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5198 union nfsd4_op_u *u)
5199 {
5200 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5201 stateid_t *stateid = &free_stateid->fr_stateid;
5202 struct nfs4_stid *s;
5203 struct nfs4_delegation *dp;
5204 struct nfs4_client *cl = cstate->session->se_client;
5205 __be32 ret = nfserr_bad_stateid;
5206
5207 spin_lock(&cl->cl_lock);
5208 s = find_stateid_locked(cl, stateid);
5209 if (!s)
5210 goto out_unlock;
5211 spin_lock(&s->sc_lock);
5212 switch (s->sc_type) {
5213 case NFS4_DELEG_STID:
5214 ret = nfserr_locks_held;
5215 break;
5216 case NFS4_OPEN_STID:
5217 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5218 if (ret)
5219 break;
5220 ret = nfserr_locks_held;
5221 break;
5222 case NFS4_LOCK_STID:
5223 spin_unlock(&s->sc_lock);
5224 refcount_inc(&s->sc_count);
5225 spin_unlock(&cl->cl_lock);
5226 ret = nfsd4_free_lock_stateid(stateid, s);
5227 goto out;
5228 case NFS4_REVOKED_DELEG_STID:
5229 spin_unlock(&s->sc_lock);
5230 dp = delegstateid(s);
5231 list_del_init(&dp->dl_recall_lru);
5232 spin_unlock(&cl->cl_lock);
5233 nfs4_put_stid(s);
5234 ret = nfs_ok;
5235 goto out;
5236 /* Default falls through and returns nfserr_bad_stateid */
5237 }
5238 spin_unlock(&s->sc_lock);
5239 out_unlock:
5240 spin_unlock(&cl->cl_lock);
5241 out:
5242 return ret;
5243 }
5244
5245 static inline int
5246 setlkflg (int type)
5247 {
5248 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
5249 RD_STATE : WR_STATE;
5250 }
5251
5252 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
5253 {
5254 struct svc_fh *current_fh = &cstate->current_fh;
5255 struct nfs4_stateowner *sop = stp->st_stateowner;
5256 __be32 status;
5257
5258 status = nfsd4_check_seqid(cstate, sop, seqid);
5259 if (status)
5260 return status;
5261 status = nfsd4_lock_ol_stateid(stp);
5262 if (status != nfs_ok)
5263 return status;
5264 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5265 if (status == nfs_ok)
5266 status = nfs4_check_fh(current_fh, &stp->st_stid);
5267 if (status != nfs_ok)
5268 mutex_unlock(&stp->st_mutex);
5269 return status;
5270 }
5271
5272 /*
5273 * Checks for sequence id mutating operations.
5274 */
5275 static __be32
5276 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5277 stateid_t *stateid, char typemask,
5278 struct nfs4_ol_stateid **stpp,
5279 struct nfsd_net *nn)
5280 {
5281 __be32 status;
5282 struct nfs4_stid *s;
5283 struct nfs4_ol_stateid *stp = NULL;
5284
5285 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5286 seqid, STATEID_VAL(stateid));
5287
5288 *stpp = NULL;
5289 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
5290 if (status)
5291 return status;
5292 stp = openlockstateid(s);
5293 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
5294
5295 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
5296 if (!status)
5297 *stpp = stp;
5298 else
5299 nfs4_put_stid(&stp->st_stid);
5300 return status;
5301 }
5302
5303 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5304 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
5305 {
5306 __be32 status;
5307 struct nfs4_openowner *oo;
5308 struct nfs4_ol_stateid *stp;
5309
5310 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5311 NFS4_OPEN_STID, &stp, nn);
5312 if (status)
5313 return status;
5314 oo = openowner(stp->st_stateowner);
5315 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5316 mutex_unlock(&stp->st_mutex);
5317 nfs4_put_stid(&stp->st_stid);
5318 return nfserr_bad_stateid;
5319 }
5320 *stpp = stp;
5321 return nfs_ok;
5322 }
5323
5324 __be32
5325 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5326 union nfsd4_op_u *u)
5327 {
5328 struct nfsd4_open_confirm *oc = &u->open_confirm;
5329 __be32 status;
5330 struct nfs4_openowner *oo;
5331 struct nfs4_ol_stateid *stp;
5332 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5333
5334 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5335 cstate->current_fh.fh_dentry);
5336
5337 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5338 if (status)
5339 return status;
5340
5341 status = nfs4_preprocess_seqid_op(cstate,
5342 oc->oc_seqid, &oc->oc_req_stateid,
5343 NFS4_OPEN_STID, &stp, nn);
5344 if (status)
5345 goto out;
5346 oo = openowner(stp->st_stateowner);
5347 status = nfserr_bad_stateid;
5348 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5349 mutex_unlock(&stp->st_mutex);
5350 goto put_stateid;
5351 }
5352 oo->oo_flags |= NFS4_OO_CONFIRMED;
5353 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5354 mutex_unlock(&stp->st_mutex);
5355 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5356 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5357
5358 nfsd4_client_record_create(oo->oo_owner.so_client);
5359 status = nfs_ok;
5360 put_stateid:
5361 nfs4_put_stid(&stp->st_stid);
5362 out:
5363 nfsd4_bump_seqid(cstate, status);
5364 return status;
5365 }
5366
5367 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5368 {
5369 if (!test_access(access, stp))
5370 return;
5371 nfs4_file_put_access(stp->st_stid.sc_file, access);
5372 clear_access(access, stp);
5373 }
5374
5375 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5376 {
5377 switch (to_access) {
5378 case NFS4_SHARE_ACCESS_READ:
5379 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5380 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5381 break;
5382 case NFS4_SHARE_ACCESS_WRITE:
5383 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5384 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5385 break;
5386 case NFS4_SHARE_ACCESS_BOTH:
5387 break;
5388 default:
5389 WARN_ON_ONCE(1);
5390 }
5391 }
5392
5393 __be32
5394 nfsd4_open_downgrade(struct svc_rqst *rqstp,
5395 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
5396 {
5397 struct nfsd4_open_downgrade *od = &u->open_downgrade;
5398 __be32 status;
5399 struct nfs4_ol_stateid *stp;
5400 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5401
5402 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5403 cstate->current_fh.fh_dentry);
5404
5405 /* We don't yet support WANT bits: */
5406 if (od->od_deleg_want)
5407 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5408 od->od_deleg_want);
5409
5410 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5411 &od->od_stateid, &stp, nn);
5412 if (status)
5413 goto out;
5414 status = nfserr_inval;
5415 if (!test_access(od->od_share_access, stp)) {
5416 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5417 stp->st_access_bmap, od->od_share_access);
5418 goto put_stateid;
5419 }
5420 if (!test_deny(od->od_share_deny, stp)) {
5421 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5422 stp->st_deny_bmap, od->od_share_deny);
5423 goto put_stateid;
5424 }
5425 nfs4_stateid_downgrade(stp, od->od_share_access);
5426 reset_union_bmap_deny(od->od_share_deny, stp);
5427 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5428 status = nfs_ok;
5429 put_stateid:
5430 mutex_unlock(&stp->st_mutex);
5431 nfs4_put_stid(&stp->st_stid);
5432 out:
5433 nfsd4_bump_seqid(cstate, status);
5434 return status;
5435 }
5436
5437 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5438 {
5439 struct nfs4_client *clp = s->st_stid.sc_client;
5440 bool unhashed;
5441 LIST_HEAD(reaplist);
5442
5443 spin_lock(&clp->cl_lock);
5444 unhashed = unhash_open_stateid(s, &reaplist);
5445
5446 if (clp->cl_minorversion) {
5447 if (unhashed)
5448 put_ol_stateid_locked(s, &reaplist);
5449 spin_unlock(&clp->cl_lock);
5450 free_ol_stateid_reaplist(&reaplist);
5451 } else {
5452 spin_unlock(&clp->cl_lock);
5453 free_ol_stateid_reaplist(&reaplist);
5454 if (unhashed)
5455 move_to_close_lru(s, clp->net);
5456 }
5457 }
5458
5459 /*
5460 * nfs4_unlock_state() called after encode
5461 */
5462 __be32
5463 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5464 union nfsd4_op_u *u)
5465 {
5466 struct nfsd4_close *close = &u->close;
5467 __be32 status;
5468 struct nfs4_ol_stateid *stp;
5469 struct net *net = SVC_NET(rqstp);
5470 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5471
5472 dprintk("NFSD: nfsd4_close on file %pd\n",
5473 cstate->current_fh.fh_dentry);
5474
5475 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5476 &close->cl_stateid,
5477 NFS4_OPEN_STID|NFS4_CLOSED_STID,
5478 &stp, nn);
5479 nfsd4_bump_seqid(cstate, status);
5480 if (status)
5481 goto out;
5482
5483 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5484 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5485
5486 nfsd4_close_open_stateid(stp);
5487 mutex_unlock(&stp->st_mutex);
5488
5489 /* See RFC5661 sectionm 18.2.4 */
5490 if (stp->st_stid.sc_client->cl_minorversion)
5491 memcpy(&close->cl_stateid, &close_stateid,
5492 sizeof(close->cl_stateid));
5493
5494 /* put reference from nfs4_preprocess_seqid_op */
5495 nfs4_put_stid(&stp->st_stid);
5496 out:
5497 return status;
5498 }
5499
5500 __be32
5501 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5502 union nfsd4_op_u *u)
5503 {
5504 struct nfsd4_delegreturn *dr = &u->delegreturn;
5505 struct nfs4_delegation *dp;
5506 stateid_t *stateid = &dr->dr_stateid;
5507 struct nfs4_stid *s;
5508 __be32 status;
5509 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5510
5511 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5512 return status;
5513
5514 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
5515 if (status)
5516 goto out;
5517 dp = delegstateid(s);
5518 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
5519 if (status)
5520 goto put_stateid;
5521
5522 destroy_delegation(dp);
5523 put_stateid:
5524 nfs4_put_stid(&dp->dl_stid);
5525 out:
5526 return status;
5527 }
5528
5529 static inline u64
5530 end_offset(u64 start, u64 len)
5531 {
5532 u64 end;
5533
5534 end = start + len;
5535 return end >= start ? end: NFS4_MAX_UINT64;
5536 }
5537
5538 /* last octet in a range */
5539 static inline u64
5540 last_byte_offset(u64 start, u64 len)
5541 {
5542 u64 end;
5543
5544 WARN_ON_ONCE(!len);
5545 end = start + len;
5546 return end > start ? end - 1: NFS4_MAX_UINT64;
5547 }
5548
5549 /*
5550 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5551 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5552 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
5553 * locking, this prevents us from being completely protocol-compliant. The
5554 * real solution to this problem is to start using unsigned file offsets in
5555 * the VFS, but this is a very deep change!
5556 */
5557 static inline void
5558 nfs4_transform_lock_offset(struct file_lock *lock)
5559 {
5560 if (lock->fl_start < 0)
5561 lock->fl_start = OFFSET_MAX;
5562 if (lock->fl_end < 0)
5563 lock->fl_end = OFFSET_MAX;
5564 }
5565
5566 static fl_owner_t
5567 nfsd4_fl_get_owner(fl_owner_t owner)
5568 {
5569 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5570
5571 nfs4_get_stateowner(&lo->lo_owner);
5572 return owner;
5573 }
5574
5575 static void
5576 nfsd4_fl_put_owner(fl_owner_t owner)
5577 {
5578 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5579
5580 if (lo)
5581 nfs4_put_stateowner(&lo->lo_owner);
5582 }
5583
5584 static void
5585 nfsd4_lm_notify(struct file_lock *fl)
5586 {
5587 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
5588 struct net *net = lo->lo_owner.so_client->net;
5589 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5590 struct nfsd4_blocked_lock *nbl = container_of(fl,
5591 struct nfsd4_blocked_lock, nbl_lock);
5592 bool queue = false;
5593
5594 /* An empty list means that something else is going to be using it */
5595 spin_lock(&nn->blocked_locks_lock);
5596 if (!list_empty(&nbl->nbl_list)) {
5597 list_del_init(&nbl->nbl_list);
5598 list_del_init(&nbl->nbl_lru);
5599 queue = true;
5600 }
5601 spin_unlock(&nn->blocked_locks_lock);
5602
5603 if (queue)
5604 nfsd4_run_cb(&nbl->nbl_cb);
5605 }
5606
5607 static const struct lock_manager_operations nfsd_posix_mng_ops = {
5608 .lm_notify = nfsd4_lm_notify,
5609 .lm_get_owner = nfsd4_fl_get_owner,
5610 .lm_put_owner = nfsd4_fl_put_owner,
5611 };
5612
5613 static inline void
5614 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
5615 {
5616 struct nfs4_lockowner *lo;
5617
5618 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
5619 lo = (struct nfs4_lockowner *) fl->fl_owner;
5620 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
5621 lo->lo_owner.so_owner.len, GFP_KERNEL);
5622 if (!deny->ld_owner.data)
5623 /* We just don't care that much */
5624 goto nevermind;
5625 deny->ld_owner.len = lo->lo_owner.so_owner.len;
5626 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
5627 } else {
5628 nevermind:
5629 deny->ld_owner.len = 0;
5630 deny->ld_owner.data = NULL;
5631 deny->ld_clientid.cl_boot = 0;
5632 deny->ld_clientid.cl_id = 0;
5633 }
5634 deny->ld_start = fl->fl_start;
5635 deny->ld_length = NFS4_MAX_UINT64;
5636 if (fl->fl_end != NFS4_MAX_UINT64)
5637 deny->ld_length = fl->fl_end - fl->fl_start + 1;
5638 deny->ld_type = NFS4_READ_LT;
5639 if (fl->fl_type != F_RDLCK)
5640 deny->ld_type = NFS4_WRITE_LT;
5641 }
5642
5643 static struct nfs4_lockowner *
5644 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
5645 {
5646 unsigned int strhashval = ownerstr_hashval(owner);
5647 struct nfs4_stateowner *so;
5648
5649 lockdep_assert_held(&clp->cl_lock);
5650
5651 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
5652 so_strhash) {
5653 if (so->so_is_open_owner)
5654 continue;
5655 if (same_owner_str(so, owner))
5656 return lockowner(nfs4_get_stateowner(so));
5657 }
5658 return NULL;
5659 }
5660
5661 static struct nfs4_lockowner *
5662 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
5663 {
5664 struct nfs4_lockowner *lo;
5665
5666 spin_lock(&clp->cl_lock);
5667 lo = find_lockowner_str_locked(clp, owner);
5668 spin_unlock(&clp->cl_lock);
5669 return lo;
5670 }
5671
5672 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5673 {
5674 unhash_lockowner_locked(lockowner(sop));
5675 }
5676
5677 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5678 {
5679 struct nfs4_lockowner *lo = lockowner(sop);
5680
5681 kmem_cache_free(lockowner_slab, lo);
5682 }
5683
5684 static const struct nfs4_stateowner_operations lockowner_ops = {
5685 .so_unhash = nfs4_unhash_lockowner,
5686 .so_free = nfs4_free_lockowner,
5687 };
5688
5689 /*
5690 * Alloc a lock owner structure.
5691 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5692 * occurred.
5693 *
5694 * strhashval = ownerstr_hashval
5695 */
5696 static struct nfs4_lockowner *
5697 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5698 struct nfs4_ol_stateid *open_stp,
5699 struct nfsd4_lock *lock)
5700 {
5701 struct nfs4_lockowner *lo, *ret;
5702
5703 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5704 if (!lo)
5705 return NULL;
5706 INIT_LIST_HEAD(&lo->lo_blocked);
5707 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5708 lo->lo_owner.so_is_open_owner = 0;
5709 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5710 lo->lo_owner.so_ops = &lockowner_ops;
5711 spin_lock(&clp->cl_lock);
5712 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
5713 if (ret == NULL) {
5714 list_add(&lo->lo_owner.so_strhash,
5715 &clp->cl_ownerstr_hashtbl[strhashval]);
5716 ret = lo;
5717 } else
5718 nfs4_free_stateowner(&lo->lo_owner);
5719
5720 spin_unlock(&clp->cl_lock);
5721 return ret;
5722 }
5723
5724 static struct nfs4_ol_stateid *
5725 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5726 {
5727 struct nfs4_ol_stateid *lst;
5728 struct nfs4_client *clp = lo->lo_owner.so_client;
5729
5730 lockdep_assert_held(&clp->cl_lock);
5731
5732 list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5733 if (lst->st_stid.sc_type != NFS4_LOCK_STID)
5734 continue;
5735 if (lst->st_stid.sc_file == fp) {
5736 refcount_inc(&lst->st_stid.sc_count);
5737 return lst;
5738 }
5739 }
5740 return NULL;
5741 }
5742
5743 static struct nfs4_ol_stateid *
5744 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5745 struct nfs4_file *fp, struct inode *inode,
5746 struct nfs4_ol_stateid *open_stp)
5747 {
5748 struct nfs4_client *clp = lo->lo_owner.so_client;
5749 struct nfs4_ol_stateid *retstp;
5750
5751 mutex_init(&stp->st_mutex);
5752 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
5753 retry:
5754 spin_lock(&clp->cl_lock);
5755 spin_lock(&fp->fi_lock);
5756 retstp = find_lock_stateid(lo, fp);
5757 if (retstp)
5758 goto out_unlock;
5759
5760 refcount_inc(&stp->st_stid.sc_count);
5761 stp->st_stid.sc_type = NFS4_LOCK_STID;
5762 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5763 get_nfs4_file(fp);
5764 stp->st_stid.sc_file = fp;
5765 stp->st_access_bmap = 0;
5766 stp->st_deny_bmap = open_stp->st_deny_bmap;
5767 stp->st_openstp = open_stp;
5768 list_add(&stp->st_locks, &open_stp->st_locks);
5769 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5770 list_add(&stp->st_perfile, &fp->fi_stateids);
5771 out_unlock:
5772 spin_unlock(&fp->fi_lock);
5773 spin_unlock(&clp->cl_lock);
5774 if (retstp) {
5775 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
5776 nfs4_put_stid(&retstp->st_stid);
5777 goto retry;
5778 }
5779 /* To keep mutex tracking happy */
5780 mutex_unlock(&stp->st_mutex);
5781 stp = retstp;
5782 }
5783 return stp;
5784 }
5785
5786 static struct nfs4_ol_stateid *
5787 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5788 struct inode *inode, struct nfs4_ol_stateid *ost,
5789 bool *new)
5790 {
5791 struct nfs4_stid *ns = NULL;
5792 struct nfs4_ol_stateid *lst;
5793 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5794 struct nfs4_client *clp = oo->oo_owner.so_client;
5795
5796 *new = false;
5797 spin_lock(&clp->cl_lock);
5798 lst = find_lock_stateid(lo, fi);
5799 spin_unlock(&clp->cl_lock);
5800 if (lst != NULL) {
5801 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
5802 goto out;
5803 nfs4_put_stid(&lst->st_stid);
5804 }
5805 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5806 if (ns == NULL)
5807 return NULL;
5808
5809 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
5810 if (lst == openlockstateid(ns))
5811 *new = true;
5812 else
5813 nfs4_put_stid(ns);
5814 out:
5815 return lst;
5816 }
5817
5818 static int
5819 check_lock_length(u64 offset, u64 length)
5820 {
5821 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5822 (length > ~offset)));
5823 }
5824
5825 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5826 {
5827 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5828
5829 lockdep_assert_held(&fp->fi_lock);
5830
5831 if (test_access(access, lock_stp))
5832 return;
5833 __nfs4_file_get_access(fp, access);
5834 set_access(access, lock_stp);
5835 }
5836
5837 static __be32
5838 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5839 struct nfs4_ol_stateid *ost,
5840 struct nfsd4_lock *lock,
5841 struct nfs4_ol_stateid **plst, bool *new)
5842 {
5843 __be32 status;
5844 struct nfs4_file *fi = ost->st_stid.sc_file;
5845 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5846 struct nfs4_client *cl = oo->oo_owner.so_client;
5847 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
5848 struct nfs4_lockowner *lo;
5849 struct nfs4_ol_stateid *lst;
5850 unsigned int strhashval;
5851
5852 lo = find_lockowner_str(cl, &lock->lk_new_owner);
5853 if (!lo) {
5854 strhashval = ownerstr_hashval(&lock->lk_new_owner);
5855 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5856 if (lo == NULL)
5857 return nfserr_jukebox;
5858 } else {
5859 /* with an existing lockowner, seqids must be the same */
5860 status = nfserr_bad_seqid;
5861 if (!cstate->minorversion &&
5862 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5863 goto out;
5864 }
5865
5866 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5867 if (lst == NULL) {
5868 status = nfserr_jukebox;
5869 goto out;
5870 }
5871
5872 status = nfs_ok;
5873 *plst = lst;
5874 out:
5875 nfs4_put_stateowner(&lo->lo_owner);
5876 return status;
5877 }
5878
5879 /*
5880 * LOCK operation
5881 */
5882 __be32
5883 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5884 union nfsd4_op_u *u)
5885 {
5886 struct nfsd4_lock *lock = &u->lock;
5887 struct nfs4_openowner *open_sop = NULL;
5888 struct nfs4_lockowner *lock_sop = NULL;
5889 struct nfs4_ol_stateid *lock_stp = NULL;
5890 struct nfs4_ol_stateid *open_stp = NULL;
5891 struct nfs4_file *fp;
5892 struct file *filp = NULL;
5893 struct nfsd4_blocked_lock *nbl = NULL;
5894 struct file_lock *file_lock = NULL;
5895 struct file_lock *conflock = NULL;
5896 __be32 status = 0;
5897 int lkflg;
5898 int err;
5899 bool new = false;
5900 unsigned char fl_type;
5901 unsigned int fl_flags = FL_POSIX;
5902 struct net *net = SVC_NET(rqstp);
5903 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5904
5905 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5906 (long long) lock->lk_offset,
5907 (long long) lock->lk_length);
5908
5909 if (check_lock_length(lock->lk_offset, lock->lk_length))
5910 return nfserr_inval;
5911
5912 if ((status = fh_verify(rqstp, &cstate->current_fh,
5913 S_IFREG, NFSD_MAY_LOCK))) {
5914 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5915 return status;
5916 }
5917
5918 if (lock->lk_is_new) {
5919 if (nfsd4_has_session(cstate))
5920 /* See rfc 5661 18.10.3: given clientid is ignored: */
5921 memcpy(&lock->lk_new_clientid,
5922 &cstate->session->se_client->cl_clientid,
5923 sizeof(clientid_t));
5924
5925 status = nfserr_stale_clientid;
5926 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5927 goto out;
5928
5929 /* validate and update open stateid and open seqid */
5930 status = nfs4_preprocess_confirmed_seqid_op(cstate,
5931 lock->lk_new_open_seqid,
5932 &lock->lk_new_open_stateid,
5933 &open_stp, nn);
5934 if (status)
5935 goto out;
5936 mutex_unlock(&open_stp->st_mutex);
5937 open_sop = openowner(open_stp->st_stateowner);
5938 status = nfserr_bad_stateid;
5939 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5940 &lock->lk_new_clientid))
5941 goto out;
5942 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5943 &lock_stp, &new);
5944 } else {
5945 status = nfs4_preprocess_seqid_op(cstate,
5946 lock->lk_old_lock_seqid,
5947 &lock->lk_old_lock_stateid,
5948 NFS4_LOCK_STID, &lock_stp, nn);
5949 }
5950 if (status)
5951 goto out;
5952 lock_sop = lockowner(lock_stp->st_stateowner);
5953
5954 lkflg = setlkflg(lock->lk_type);
5955 status = nfs4_check_openmode(lock_stp, lkflg);
5956 if (status)
5957 goto out;
5958
5959 status = nfserr_grace;
5960 if (locks_in_grace(net) && !lock->lk_reclaim)
5961 goto out;
5962 status = nfserr_no_grace;
5963 if (!locks_in_grace(net) && lock->lk_reclaim)
5964 goto out;
5965
5966 fp = lock_stp->st_stid.sc_file;
5967 switch (lock->lk_type) {
5968 case NFS4_READW_LT:
5969 if (nfsd4_has_session(cstate))
5970 fl_flags |= FL_SLEEP;
5971 /* Fallthrough */
5972 case NFS4_READ_LT:
5973 spin_lock(&fp->fi_lock);
5974 filp = find_readable_file_locked(fp);
5975 if (filp)
5976 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5977 spin_unlock(&fp->fi_lock);
5978 fl_type = F_RDLCK;
5979 break;
5980 case NFS4_WRITEW_LT:
5981 if (nfsd4_has_session(cstate))
5982 fl_flags |= FL_SLEEP;
5983 /* Fallthrough */
5984 case NFS4_WRITE_LT:
5985 spin_lock(&fp->fi_lock);
5986 filp = find_writeable_file_locked(fp);
5987 if (filp)
5988 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5989 spin_unlock(&fp->fi_lock);
5990 fl_type = F_WRLCK;
5991 break;
5992 default:
5993 status = nfserr_inval;
5994 goto out;
5995 }
5996
5997 if (!filp) {
5998 status = nfserr_openmode;
5999 goto out;
6000 }
6001
6002 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6003 if (!nbl) {
6004 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6005 status = nfserr_jukebox;
6006 goto out;
6007 }
6008
6009 file_lock = &nbl->nbl_lock;
6010 file_lock->fl_type = fl_type;
6011 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6012 file_lock->fl_pid = current->tgid;
6013 file_lock->fl_file = filp;
6014 file_lock->fl_flags = fl_flags;
6015 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6016 file_lock->fl_start = lock->lk_offset;
6017 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6018 nfs4_transform_lock_offset(file_lock);
6019
6020 conflock = locks_alloc_lock();
6021 if (!conflock) {
6022 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6023 status = nfserr_jukebox;
6024 goto out;
6025 }
6026
6027 if (fl_flags & FL_SLEEP) {
6028 nbl->nbl_time = jiffies;
6029 spin_lock(&nn->blocked_locks_lock);
6030 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6031 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6032 spin_unlock(&nn->blocked_locks_lock);
6033 }
6034
6035 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
6036 switch (err) {
6037 case 0: /* success! */
6038 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6039 status = 0;
6040 break;
6041 case FILE_LOCK_DEFERRED:
6042 nbl = NULL;
6043 /* Fallthrough */
6044 case -EAGAIN: /* conflock holds conflicting lock */
6045 status = nfserr_denied;
6046 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6047 nfs4_set_lock_denied(conflock, &lock->lk_denied);
6048 break;
6049 case -EDEADLK:
6050 status = nfserr_deadlock;
6051 break;
6052 default:
6053 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6054 status = nfserrno(err);
6055 break;
6056 }
6057 out:
6058 if (nbl) {
6059 /* dequeue it if we queued it before */
6060 if (fl_flags & FL_SLEEP) {
6061 spin_lock(&nn->blocked_locks_lock);
6062 list_del_init(&nbl->nbl_list);
6063 list_del_init(&nbl->nbl_lru);
6064 spin_unlock(&nn->blocked_locks_lock);
6065 }
6066 free_blocked_lock(nbl);
6067 }
6068 if (filp)
6069 fput(filp);
6070 if (lock_stp) {
6071 /* Bump seqid manually if the 4.0 replay owner is openowner */
6072 if (cstate->replay_owner &&
6073 cstate->replay_owner != &lock_sop->lo_owner &&
6074 seqid_mutating_err(ntohl(status)))
6075 lock_sop->lo_owner.so_seqid++;
6076
6077 /*
6078 * If this is a new, never-before-used stateid, and we are
6079 * returning an error, then just go ahead and release it.
6080 */
6081 if (status && new) {
6082 lock_stp->st_stid.sc_type = NFS4_CLOSED_STID;
6083 release_lock_stateid(lock_stp);
6084 }
6085
6086 mutex_unlock(&lock_stp->st_mutex);
6087
6088 nfs4_put_stid(&lock_stp->st_stid);
6089 }
6090 if (open_stp)
6091 nfs4_put_stid(&open_stp->st_stid);
6092 nfsd4_bump_seqid(cstate, status);
6093 if (conflock)
6094 locks_free_lock(conflock);
6095 return status;
6096 }
6097
6098 /*
6099 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6100 * so we do a temporary open here just to get an open file to pass to
6101 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6102 * inode operation.)
6103 */
6104 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6105 {
6106 struct file *file;
6107 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
6108 if (!err) {
6109 err = nfserrno(vfs_test_lock(file, lock));
6110 fput(file);
6111 }
6112 return err;
6113 }
6114
6115 /*
6116 * LOCKT operation
6117 */
6118 __be32
6119 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6120 union nfsd4_op_u *u)
6121 {
6122 struct nfsd4_lockt *lockt = &u->lockt;
6123 struct file_lock *file_lock = NULL;
6124 struct nfs4_lockowner *lo = NULL;
6125 __be32 status;
6126 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6127
6128 if (locks_in_grace(SVC_NET(rqstp)))
6129 return nfserr_grace;
6130
6131 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6132 return nfserr_inval;
6133
6134 if (!nfsd4_has_session(cstate)) {
6135 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
6136 if (status)
6137 goto out;
6138 }
6139
6140 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6141 goto out;
6142
6143 file_lock = locks_alloc_lock();
6144 if (!file_lock) {
6145 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6146 status = nfserr_jukebox;
6147 goto out;
6148 }
6149
6150 switch (lockt->lt_type) {
6151 case NFS4_READ_LT:
6152 case NFS4_READW_LT:
6153 file_lock->fl_type = F_RDLCK;
6154 break;
6155 case NFS4_WRITE_LT:
6156 case NFS4_WRITEW_LT:
6157 file_lock->fl_type = F_WRLCK;
6158 break;
6159 default:
6160 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6161 status = nfserr_inval;
6162 goto out;
6163 }
6164
6165 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6166 if (lo)
6167 file_lock->fl_owner = (fl_owner_t)lo;
6168 file_lock->fl_pid = current->tgid;
6169 file_lock->fl_flags = FL_POSIX;
6170
6171 file_lock->fl_start = lockt->lt_offset;
6172 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6173
6174 nfs4_transform_lock_offset(file_lock);
6175
6176 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6177 if (status)
6178 goto out;
6179
6180 if (file_lock->fl_type != F_UNLCK) {
6181 status = nfserr_denied;
6182 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6183 }
6184 out:
6185 if (lo)
6186 nfs4_put_stateowner(&lo->lo_owner);
6187 if (file_lock)
6188 locks_free_lock(file_lock);
6189 return status;
6190 }
6191
6192 __be32
6193 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6194 union nfsd4_op_u *u)
6195 {
6196 struct nfsd4_locku *locku = &u->locku;
6197 struct nfs4_ol_stateid *stp;
6198 struct file *filp = NULL;
6199 struct file_lock *file_lock = NULL;
6200 __be32 status;
6201 int err;
6202 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6203
6204 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6205 (long long) locku->lu_offset,
6206 (long long) locku->lu_length);
6207
6208 if (check_lock_length(locku->lu_offset, locku->lu_length))
6209 return nfserr_inval;
6210
6211 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
6212 &locku->lu_stateid, NFS4_LOCK_STID,
6213 &stp, nn);
6214 if (status)
6215 goto out;
6216 filp = find_any_file(stp->st_stid.sc_file);
6217 if (!filp) {
6218 status = nfserr_lock_range;
6219 goto put_stateid;
6220 }
6221 file_lock = locks_alloc_lock();
6222 if (!file_lock) {
6223 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6224 status = nfserr_jukebox;
6225 goto fput;
6226 }
6227
6228 file_lock->fl_type = F_UNLCK;
6229 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
6230 file_lock->fl_pid = current->tgid;
6231 file_lock->fl_file = filp;
6232 file_lock->fl_flags = FL_POSIX;
6233 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6234 file_lock->fl_start = locku->lu_offset;
6235
6236 file_lock->fl_end = last_byte_offset(locku->lu_offset,
6237 locku->lu_length);
6238 nfs4_transform_lock_offset(file_lock);
6239
6240 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
6241 if (err) {
6242 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6243 goto out_nfserr;
6244 }
6245 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
6246 fput:
6247 fput(filp);
6248 put_stateid:
6249 mutex_unlock(&stp->st_mutex);
6250 nfs4_put_stid(&stp->st_stid);
6251 out:
6252 nfsd4_bump_seqid(cstate, status);
6253 if (file_lock)
6254 locks_free_lock(file_lock);
6255 return status;
6256
6257 out_nfserr:
6258 status = nfserrno(err);
6259 goto fput;
6260 }
6261
6262 /*
6263 * returns
6264 * true: locks held by lockowner
6265 * false: no locks held by lockowner
6266 */
6267 static bool
6268 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
6269 {
6270 struct file_lock *fl;
6271 int status = false;
6272 struct file *filp = find_any_file(fp);
6273 struct inode *inode;
6274 struct file_lock_context *flctx;
6275
6276 if (!filp) {
6277 /* Any valid lock stateid should have some sort of access */
6278 WARN_ON_ONCE(1);
6279 return status;
6280 }
6281
6282 inode = file_inode(filp);
6283 flctx = inode->i_flctx;
6284
6285 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
6286 spin_lock(&flctx->flc_lock);
6287 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
6288 if (fl->fl_owner == (fl_owner_t)lowner) {
6289 status = true;
6290 break;
6291 }
6292 }
6293 spin_unlock(&flctx->flc_lock);
6294 }
6295 fput(filp);
6296 return status;
6297 }
6298
6299 __be32
6300 nfsd4_release_lockowner(struct svc_rqst *rqstp,
6301 struct nfsd4_compound_state *cstate,
6302 union nfsd4_op_u *u)
6303 {
6304 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
6305 clientid_t *clid = &rlockowner->rl_clientid;
6306 struct nfs4_stateowner *sop;
6307 struct nfs4_lockowner *lo = NULL;
6308 struct nfs4_ol_stateid *stp;
6309 struct xdr_netobj *owner = &rlockowner->rl_owner;
6310 unsigned int hashval = ownerstr_hashval(owner);
6311 __be32 status;
6312 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6313 struct nfs4_client *clp;
6314 LIST_HEAD (reaplist);
6315
6316 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6317 clid->cl_boot, clid->cl_id);
6318
6319 status = lookup_clientid(clid, cstate, nn);
6320 if (status)
6321 return status;
6322
6323 clp = cstate->clp;
6324 /* Find the matching lock stateowner */
6325 spin_lock(&clp->cl_lock);
6326 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
6327 so_strhash) {
6328
6329 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
6330 continue;
6331
6332 /* see if there are still any locks associated with it */
6333 lo = lockowner(sop);
6334 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
6335 if (check_for_locks(stp->st_stid.sc_file, lo)) {
6336 status = nfserr_locks_held;
6337 spin_unlock(&clp->cl_lock);
6338 return status;
6339 }
6340 }
6341
6342 nfs4_get_stateowner(sop);
6343 break;
6344 }
6345 if (!lo) {
6346 spin_unlock(&clp->cl_lock);
6347 return status;
6348 }
6349
6350 unhash_lockowner_locked(lo);
6351 while (!list_empty(&lo->lo_owner.so_stateids)) {
6352 stp = list_first_entry(&lo->lo_owner.so_stateids,
6353 struct nfs4_ol_stateid,
6354 st_perstateowner);
6355 WARN_ON(!unhash_lock_stateid(stp));
6356 put_ol_stateid_locked(stp, &reaplist);
6357 }
6358 spin_unlock(&clp->cl_lock);
6359 free_ol_stateid_reaplist(&reaplist);
6360 nfs4_put_stateowner(&lo->lo_owner);
6361
6362 return status;
6363 }
6364
6365 static inline struct nfs4_client_reclaim *
6366 alloc_reclaim(void)
6367 {
6368 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
6369 }
6370
6371 bool
6372 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
6373 {
6374 struct nfs4_client_reclaim *crp;
6375
6376 crp = nfsd4_find_reclaim_client(name, nn);
6377 return (crp && crp->cr_clp);
6378 }
6379
6380 /*
6381 * failure => all reset bets are off, nfserr_no_grace...
6382 */
6383 struct nfs4_client_reclaim *
6384 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
6385 {
6386 unsigned int strhashval;
6387 struct nfs4_client_reclaim *crp;
6388
6389 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
6390 crp = alloc_reclaim();
6391 if (crp) {
6392 strhashval = clientstr_hashval(name);
6393 INIT_LIST_HEAD(&crp->cr_strhash);
6394 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6395 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
6396 crp->cr_clp = NULL;
6397 nn->reclaim_str_hashtbl_size++;
6398 }
6399 return crp;
6400 }
6401
6402 void
6403 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6404 {
6405 list_del(&crp->cr_strhash);
6406 kfree(crp);
6407 nn->reclaim_str_hashtbl_size--;
6408 }
6409
6410 void
6411 nfs4_release_reclaim(struct nfsd_net *nn)
6412 {
6413 struct nfs4_client_reclaim *crp = NULL;
6414 int i;
6415
6416 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6417 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6418 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6419 struct nfs4_client_reclaim, cr_strhash);
6420 nfs4_remove_reclaim_record(crp, nn);
6421 }
6422 }
6423 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
6424 }
6425
6426 /*
6427 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6428 struct nfs4_client_reclaim *
6429 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
6430 {
6431 unsigned int strhashval;
6432 struct nfs4_client_reclaim *crp = NULL;
6433
6434 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
6435
6436 strhashval = clientstr_hashval(recdir);
6437 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
6438 if (same_name(crp->cr_recdir, recdir)) {
6439 return crp;
6440 }
6441 }
6442 return NULL;
6443 }
6444
6445 /*
6446 * Called from OPEN. Look for clientid in reclaim list.
6447 */
6448 __be32
6449 nfs4_check_open_reclaim(clientid_t *clid,
6450 struct nfsd4_compound_state *cstate,
6451 struct nfsd_net *nn)
6452 {
6453 __be32 status;
6454
6455 /* find clientid in conf_id_hashtbl */
6456 status = lookup_clientid(clid, cstate, nn);
6457 if (status)
6458 return nfserr_reclaim_bad;
6459
6460 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6461 return nfserr_no_grace;
6462
6463 if (nfsd4_client_record_check(cstate->clp))
6464 return nfserr_reclaim_bad;
6465
6466 return nfs_ok;
6467 }
6468
6469 #ifdef CONFIG_NFSD_FAULT_INJECTION
6470 static inline void
6471 put_client(struct nfs4_client *clp)
6472 {
6473 atomic_dec(&clp->cl_refcount);
6474 }
6475
6476 static struct nfs4_client *
6477 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
6478 {
6479 struct nfs4_client *clp;
6480 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6481 nfsd_net_id);
6482
6483 if (!nfsd_netns_ready(nn))
6484 return NULL;
6485
6486 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6487 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
6488 return clp;
6489 }
6490 return NULL;
6491 }
6492
6493 u64
6494 nfsd_inject_print_clients(void)
6495 {
6496 struct nfs4_client *clp;
6497 u64 count = 0;
6498 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6499 nfsd_net_id);
6500 char buf[INET6_ADDRSTRLEN];
6501
6502 if (!nfsd_netns_ready(nn))
6503 return 0;
6504
6505 spin_lock(&nn->client_lock);
6506 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6507 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6508 pr_info("NFS Client: %s\n", buf);
6509 ++count;
6510 }
6511 spin_unlock(&nn->client_lock);
6512
6513 return count;
6514 }
6515
6516 u64
6517 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
6518 {
6519 u64 count = 0;
6520 struct nfs4_client *clp;
6521 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6522 nfsd_net_id);
6523
6524 if (!nfsd_netns_ready(nn))
6525 return count;
6526
6527 spin_lock(&nn->client_lock);
6528 clp = nfsd_find_client(addr, addr_size);
6529 if (clp) {
6530 if (mark_client_expired_locked(clp) == nfs_ok)
6531 ++count;
6532 else
6533 clp = NULL;
6534 }
6535 spin_unlock(&nn->client_lock);
6536
6537 if (clp)
6538 expire_client(clp);
6539
6540 return count;
6541 }
6542
6543 u64
6544 nfsd_inject_forget_clients(u64 max)
6545 {
6546 u64 count = 0;
6547 struct nfs4_client *clp, *next;
6548 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6549 nfsd_net_id);
6550 LIST_HEAD(reaplist);
6551
6552 if (!nfsd_netns_ready(nn))
6553 return count;
6554
6555 spin_lock(&nn->client_lock);
6556 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6557 if (mark_client_expired_locked(clp) == nfs_ok) {
6558 list_add(&clp->cl_lru, &reaplist);
6559 if (max != 0 && ++count >= max)
6560 break;
6561 }
6562 }
6563 spin_unlock(&nn->client_lock);
6564
6565 list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
6566 expire_client(clp);
6567
6568 return count;
6569 }
6570
6571 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
6572 const char *type)
6573 {
6574 char buf[INET6_ADDRSTRLEN];
6575 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6576 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
6577 }
6578
6579 static void
6580 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
6581 struct list_head *collect)
6582 {
6583 struct nfs4_client *clp = lst->st_stid.sc_client;
6584 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6585 nfsd_net_id);
6586
6587 if (!collect)
6588 return;
6589
6590 lockdep_assert_held(&nn->client_lock);
6591 atomic_inc(&clp->cl_refcount);
6592 list_add(&lst->st_locks, collect);
6593 }
6594
6595 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
6596 struct list_head *collect,
6597 bool (*func)(struct nfs4_ol_stateid *))
6598 {
6599 struct nfs4_openowner *oop;
6600 struct nfs4_ol_stateid *stp, *st_next;
6601 struct nfs4_ol_stateid *lst, *lst_next;
6602 u64 count = 0;
6603
6604 spin_lock(&clp->cl_lock);
6605 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
6606 list_for_each_entry_safe(stp, st_next,
6607 &oop->oo_owner.so_stateids, st_perstateowner) {
6608 list_for_each_entry_safe(lst, lst_next,
6609 &stp->st_locks, st_locks) {
6610 if (func) {
6611 if (func(lst))
6612 nfsd_inject_add_lock_to_list(lst,
6613 collect);
6614 }
6615 ++count;
6616 /*
6617 * Despite the fact that these functions deal
6618 * with 64-bit integers for "count", we must
6619 * ensure that it doesn't blow up the
6620 * clp->cl_refcount. Throw a warning if we
6621 * start to approach INT_MAX here.
6622 */
6623 WARN_ON_ONCE(count == (INT_MAX / 2));
6624 if (count == max)
6625 goto out;
6626 }
6627 }
6628 }
6629 out:
6630 spin_unlock(&clp->cl_lock);
6631
6632 return count;
6633 }
6634
6635 static u64
6636 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
6637 u64 max)
6638 {
6639 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
6640 }
6641
6642 static u64
6643 nfsd_print_client_locks(struct nfs4_client *clp)
6644 {
6645 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
6646 nfsd_print_count(clp, count, "locked files");
6647 return count;
6648 }
6649
6650 u64
6651 nfsd_inject_print_locks(void)
6652 {
6653 struct nfs4_client *clp;
6654 u64 count = 0;
6655 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6656 nfsd_net_id);
6657
6658 if (!nfsd_netns_ready(nn))
6659 return 0;
6660
6661 spin_lock(&nn->client_lock);
6662 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6663 count += nfsd_print_client_locks(clp);
6664 spin_unlock(&nn->client_lock);
6665
6666 return count;
6667 }
6668
6669 static void
6670 nfsd_reap_locks(struct list_head *reaplist)
6671 {
6672 struct nfs4_client *clp;
6673 struct nfs4_ol_stateid *stp, *next;
6674
6675 list_for_each_entry_safe(stp, next, reaplist, st_locks) {
6676 list_del_init(&stp->st_locks);
6677 clp = stp->st_stid.sc_client;
6678 nfs4_put_stid(&stp->st_stid);
6679 put_client(clp);
6680 }
6681 }
6682
6683 u64
6684 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
6685 {
6686 unsigned int count = 0;
6687 struct nfs4_client *clp;
6688 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6689 nfsd_net_id);
6690 LIST_HEAD(reaplist);
6691
6692 if (!nfsd_netns_ready(nn))
6693 return count;
6694
6695 spin_lock(&nn->client_lock);
6696 clp = nfsd_find_client(addr, addr_size);
6697 if (clp)
6698 count = nfsd_collect_client_locks(clp, &reaplist, 0);
6699 spin_unlock(&nn->client_lock);
6700 nfsd_reap_locks(&reaplist);
6701 return count;
6702 }
6703
6704 u64
6705 nfsd_inject_forget_locks(u64 max)
6706 {
6707 u64 count = 0;
6708 struct nfs4_client *clp;
6709 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6710 nfsd_net_id);
6711 LIST_HEAD(reaplist);
6712
6713 if (!nfsd_netns_ready(nn))
6714 return count;
6715
6716 spin_lock(&nn->client_lock);
6717 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6718 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
6719 if (max != 0 && count >= max)
6720 break;
6721 }
6722 spin_unlock(&nn->client_lock);
6723 nfsd_reap_locks(&reaplist);
6724 return count;
6725 }
6726
6727 static u64
6728 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6729 struct list_head *collect,
6730 void (*func)(struct nfs4_openowner *))
6731 {
6732 struct nfs4_openowner *oop, *next;
6733 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6734 nfsd_net_id);
6735 u64 count = 0;
6736
6737 lockdep_assert_held(&nn->client_lock);
6738
6739 spin_lock(&clp->cl_lock);
6740 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6741 if (func) {
6742 func(oop);
6743 if (collect) {
6744 atomic_inc(&clp->cl_refcount);
6745 list_add(&oop->oo_perclient, collect);
6746 }
6747 }
6748 ++count;
6749 /*
6750 * Despite the fact that these functions deal with
6751 * 64-bit integers for "count", we must ensure that
6752 * it doesn't blow up the clp->cl_refcount. Throw a
6753 * warning if we start to approach INT_MAX here.
6754 */
6755 WARN_ON_ONCE(count == (INT_MAX / 2));
6756 if (count == max)
6757 break;
6758 }
6759 spin_unlock(&clp->cl_lock);
6760
6761 return count;
6762 }
6763
6764 static u64
6765 nfsd_print_client_openowners(struct nfs4_client *clp)
6766 {
6767 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6768
6769 nfsd_print_count(clp, count, "openowners");
6770 return count;
6771 }
6772
6773 static u64
6774 nfsd_collect_client_openowners(struct nfs4_client *clp,
6775 struct list_head *collect, u64 max)
6776 {
6777 return nfsd_foreach_client_openowner(clp, max, collect,
6778 unhash_openowner_locked);
6779 }
6780
6781 u64
6782 nfsd_inject_print_openowners(void)
6783 {
6784 struct nfs4_client *clp;
6785 u64 count = 0;
6786 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6787 nfsd_net_id);
6788
6789 if (!nfsd_netns_ready(nn))
6790 return 0;
6791
6792 spin_lock(&nn->client_lock);
6793 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6794 count += nfsd_print_client_openowners(clp);
6795 spin_unlock(&nn->client_lock);
6796
6797 return count;
6798 }
6799
6800 static void
6801 nfsd_reap_openowners(struct list_head *reaplist)
6802 {
6803 struct nfs4_client *clp;
6804 struct nfs4_openowner *oop, *next;
6805
6806 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6807 list_del_init(&oop->oo_perclient);
6808 clp = oop->oo_owner.so_client;
6809 release_openowner(oop);
6810 put_client(clp);
6811 }
6812 }
6813
6814 u64
6815 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6816 size_t addr_size)
6817 {
6818 unsigned int count = 0;
6819 struct nfs4_client *clp;
6820 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6821 nfsd_net_id);
6822 LIST_HEAD(reaplist);
6823
6824 if (!nfsd_netns_ready(nn))
6825 return count;
6826
6827 spin_lock(&nn->client_lock);
6828 clp = nfsd_find_client(addr, addr_size);
6829 if (clp)
6830 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6831 spin_unlock(&nn->client_lock);
6832 nfsd_reap_openowners(&reaplist);
6833 return count;
6834 }
6835
6836 u64
6837 nfsd_inject_forget_openowners(u64 max)
6838 {
6839 u64 count = 0;
6840 struct nfs4_client *clp;
6841 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6842 nfsd_net_id);
6843 LIST_HEAD(reaplist);
6844
6845 if (!nfsd_netns_ready(nn))
6846 return count;
6847
6848 spin_lock(&nn->client_lock);
6849 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6850 count += nfsd_collect_client_openowners(clp, &reaplist,
6851 max - count);
6852 if (max != 0 && count >= max)
6853 break;
6854 }
6855 spin_unlock(&nn->client_lock);
6856 nfsd_reap_openowners(&reaplist);
6857 return count;
6858 }
6859
6860 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6861 struct list_head *victims)
6862 {
6863 struct nfs4_delegation *dp, *next;
6864 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6865 nfsd_net_id);
6866 u64 count = 0;
6867
6868 lockdep_assert_held(&nn->client_lock);
6869
6870 spin_lock(&state_lock);
6871 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6872 if (victims) {
6873 /*
6874 * It's not safe to mess with delegations that have a
6875 * non-zero dl_time. They might have already been broken
6876 * and could be processed by the laundromat outside of
6877 * the state_lock. Just leave them be.
6878 */
6879 if (dp->dl_time != 0)
6880 continue;
6881
6882 atomic_inc(&clp->cl_refcount);
6883 WARN_ON(!unhash_delegation_locked(dp));
6884 list_add(&dp->dl_recall_lru, victims);
6885 }
6886 ++count;
6887 /*
6888 * Despite the fact that these functions deal with
6889 * 64-bit integers for "count", we must ensure that
6890 * it doesn't blow up the clp->cl_refcount. Throw a
6891 * warning if we start to approach INT_MAX here.
6892 */
6893 WARN_ON_ONCE(count == (INT_MAX / 2));
6894 if (count == max)
6895 break;
6896 }
6897 spin_unlock(&state_lock);
6898 return count;
6899 }
6900
6901 static u64
6902 nfsd_print_client_delegations(struct nfs4_client *clp)
6903 {
6904 u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6905
6906 nfsd_print_count(clp, count, "delegations");
6907 return count;
6908 }
6909
6910 u64
6911 nfsd_inject_print_delegations(void)
6912 {
6913 struct nfs4_client *clp;
6914 u64 count = 0;
6915 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6916 nfsd_net_id);
6917
6918 if (!nfsd_netns_ready(nn))
6919 return 0;
6920
6921 spin_lock(&nn->client_lock);
6922 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6923 count += nfsd_print_client_delegations(clp);
6924 spin_unlock(&nn->client_lock);
6925
6926 return count;
6927 }
6928
6929 static void
6930 nfsd_forget_delegations(struct list_head *reaplist)
6931 {
6932 struct nfs4_client *clp;
6933 struct nfs4_delegation *dp, *next;
6934
6935 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6936 list_del_init(&dp->dl_recall_lru);
6937 clp = dp->dl_stid.sc_client;
6938 revoke_delegation(dp);
6939 put_client(clp);
6940 }
6941 }
6942
6943 u64
6944 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6945 size_t addr_size)
6946 {
6947 u64 count = 0;
6948 struct nfs4_client *clp;
6949 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6950 nfsd_net_id);
6951 LIST_HEAD(reaplist);
6952
6953 if (!nfsd_netns_ready(nn))
6954 return count;
6955
6956 spin_lock(&nn->client_lock);
6957 clp = nfsd_find_client(addr, addr_size);
6958 if (clp)
6959 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6960 spin_unlock(&nn->client_lock);
6961
6962 nfsd_forget_delegations(&reaplist);
6963 return count;
6964 }
6965
6966 u64
6967 nfsd_inject_forget_delegations(u64 max)
6968 {
6969 u64 count = 0;
6970 struct nfs4_client *clp;
6971 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6972 nfsd_net_id);
6973 LIST_HEAD(reaplist);
6974
6975 if (!nfsd_netns_ready(nn))
6976 return count;
6977
6978 spin_lock(&nn->client_lock);
6979 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6980 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6981 if (max != 0 && count >= max)
6982 break;
6983 }
6984 spin_unlock(&nn->client_lock);
6985 nfsd_forget_delegations(&reaplist);
6986 return count;
6987 }
6988
6989 static void
6990 nfsd_recall_delegations(struct list_head *reaplist)
6991 {
6992 struct nfs4_client *clp;
6993 struct nfs4_delegation *dp, *next;
6994
6995 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6996 list_del_init(&dp->dl_recall_lru);
6997 clp = dp->dl_stid.sc_client;
6998 /*
6999 * We skipped all entries that had a zero dl_time before,
7000 * so we can now reset the dl_time back to 0. If a delegation
7001 * break comes in now, then it won't make any difference since
7002 * we're recalling it either way.
7003 */
7004 spin_lock(&state_lock);
7005 dp->dl_time = 0;
7006 spin_unlock(&state_lock);
7007 nfsd_break_one_deleg(dp);
7008 put_client(clp);
7009 }
7010 }
7011
7012 u64
7013 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
7014 size_t addr_size)
7015 {
7016 u64 count = 0;
7017 struct nfs4_client *clp;
7018 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7019 nfsd_net_id);
7020 LIST_HEAD(reaplist);
7021
7022 if (!nfsd_netns_ready(nn))
7023 return count;
7024
7025 spin_lock(&nn->client_lock);
7026 clp = nfsd_find_client(addr, addr_size);
7027 if (clp)
7028 count = nfsd_find_all_delegations(clp, 0, &reaplist);
7029 spin_unlock(&nn->client_lock);
7030
7031 nfsd_recall_delegations(&reaplist);
7032 return count;
7033 }
7034
7035 u64
7036 nfsd_inject_recall_delegations(u64 max)
7037 {
7038 u64 count = 0;
7039 struct nfs4_client *clp, *next;
7040 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7041 nfsd_net_id);
7042 LIST_HEAD(reaplist);
7043
7044 if (!nfsd_netns_ready(nn))
7045 return count;
7046
7047 spin_lock(&nn->client_lock);
7048 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7049 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7050 if (max != 0 && ++count >= max)
7051 break;
7052 }
7053 spin_unlock(&nn->client_lock);
7054 nfsd_recall_delegations(&reaplist);
7055 return count;
7056 }
7057 #endif /* CONFIG_NFSD_FAULT_INJECTION */
7058
7059 /*
7060 * Since the lifetime of a delegation isn't limited to that of an open, a
7061 * client may quite reasonably hang on to a delegation as long as it has
7062 * the inode cached. This becomes an obvious problem the first time a
7063 * client's inode cache approaches the size of the server's total memory.
7064 *
7065 * For now we avoid this problem by imposing a hard limit on the number
7066 * of delegations, which varies according to the server's memory size.
7067 */
7068 static void
7069 set_max_delegations(void)
7070 {
7071 /*
7072 * Allow at most 4 delegations per megabyte of RAM. Quick
7073 * estimates suggest that in the worst case (where every delegation
7074 * is for a different inode), a delegation could take about 1.5K,
7075 * giving a worst case usage of about 6% of memory.
7076 */
7077 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7078 }
7079
7080 static int nfs4_state_create_net(struct net *net)
7081 {
7082 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7083 int i;
7084
7085 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7086 CLIENT_HASH_SIZE, GFP_KERNEL);
7087 if (!nn->conf_id_hashtbl)
7088 goto err;
7089 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7090 CLIENT_HASH_SIZE, GFP_KERNEL);
7091 if (!nn->unconf_id_hashtbl)
7092 goto err_unconf_id;
7093 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
7094 SESSION_HASH_SIZE, GFP_KERNEL);
7095 if (!nn->sessionid_hashtbl)
7096 goto err_sessionid;
7097
7098 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7099 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7100 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7101 }
7102 for (i = 0; i < SESSION_HASH_SIZE; i++)
7103 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7104 nn->conf_name_tree = RB_ROOT;
7105 nn->unconf_name_tree = RB_ROOT;
7106 nn->boot_time = get_seconds();
7107 nn->grace_ended = false;
7108 nn->nfsd4_manager.block_opens = true;
7109 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7110 INIT_LIST_HEAD(&nn->client_lru);
7111 INIT_LIST_HEAD(&nn->close_lru);
7112 INIT_LIST_HEAD(&nn->del_recall_lru);
7113 spin_lock_init(&nn->client_lock);
7114
7115 spin_lock_init(&nn->blocked_locks_lock);
7116 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7117
7118 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7119 get_net(net);
7120
7121 return 0;
7122
7123 err_sessionid:
7124 kfree(nn->unconf_id_hashtbl);
7125 err_unconf_id:
7126 kfree(nn->conf_id_hashtbl);
7127 err:
7128 return -ENOMEM;
7129 }
7130
7131 static void
7132 nfs4_state_destroy_net(struct net *net)
7133 {
7134 int i;
7135 struct nfs4_client *clp = NULL;
7136 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7137
7138 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7139 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7140 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7141 destroy_client(clp);
7142 }
7143 }
7144
7145 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7146 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7147 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7148 destroy_client(clp);
7149 }
7150 }
7151
7152 kfree(nn->sessionid_hashtbl);
7153 kfree(nn->unconf_id_hashtbl);
7154 kfree(nn->conf_id_hashtbl);
7155 put_net(net);
7156 }
7157
7158 int
7159 nfs4_state_start_net(struct net *net)
7160 {
7161 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7162 int ret;
7163
7164 ret = nfs4_state_create_net(net);
7165 if (ret)
7166 return ret;
7167 locks_start_grace(net, &nn->nfsd4_manager);
7168 nfsd4_client_tracking_init(net);
7169 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
7170 nn->nfsd4_grace, net->ns.inum);
7171 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7172 return 0;
7173 }
7174
7175 /* initialization to perform when the nfsd service is started: */
7176
7177 int
7178 nfs4_state_start(void)
7179 {
7180 int ret;
7181
7182 ret = set_callback_cred();
7183 if (ret)
7184 return ret;
7185
7186 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7187 if (laundry_wq == NULL) {
7188 ret = -ENOMEM;
7189 goto out_cleanup_cred;
7190 }
7191 ret = nfsd4_create_callback_queue();
7192 if (ret)
7193 goto out_free_laundry;
7194
7195 set_max_delegations();
7196 return 0;
7197
7198 out_free_laundry:
7199 destroy_workqueue(laundry_wq);
7200 out_cleanup_cred:
7201 cleanup_callback_cred();
7202 return ret;
7203 }
7204
7205 void
7206 nfs4_state_shutdown_net(struct net *net)
7207 {
7208 struct nfs4_delegation *dp = NULL;
7209 struct list_head *pos, *next, reaplist;
7210 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7211 struct nfsd4_blocked_lock *nbl;
7212
7213 cancel_delayed_work_sync(&nn->laundromat_work);
7214 locks_end_grace(&nn->nfsd4_manager);
7215
7216 INIT_LIST_HEAD(&reaplist);
7217 spin_lock(&state_lock);
7218 list_for_each_safe(pos, next, &nn->del_recall_lru) {
7219 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7220 WARN_ON(!unhash_delegation_locked(dp));
7221 list_add(&dp->dl_recall_lru, &reaplist);
7222 }
7223 spin_unlock(&state_lock);
7224 list_for_each_safe(pos, next, &reaplist) {
7225 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7226 list_del_init(&dp->dl_recall_lru);
7227 put_clnt_odstate(dp->dl_clnt_odstate);
7228 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
7229 nfs4_put_stid(&dp->dl_stid);
7230 }
7231
7232 BUG_ON(!list_empty(&reaplist));
7233 spin_lock(&nn->blocked_locks_lock);
7234 while (!list_empty(&nn->blocked_locks_lru)) {
7235 nbl = list_first_entry(&nn->blocked_locks_lru,
7236 struct nfsd4_blocked_lock, nbl_lru);
7237 list_move(&nbl->nbl_lru, &reaplist);
7238 list_del_init(&nbl->nbl_list);
7239 }
7240 spin_unlock(&nn->blocked_locks_lock);
7241
7242 while (!list_empty(&reaplist)) {
7243 nbl = list_first_entry(&reaplist,
7244 struct nfsd4_blocked_lock, nbl_lru);
7245 list_del_init(&nbl->nbl_lru);
7246 posix_unblock_lock(&nbl->nbl_lock);
7247 free_blocked_lock(nbl);
7248 }
7249
7250 nfsd4_client_tracking_exit(net);
7251 nfs4_state_destroy_net(net);
7252 }
7253
7254 void
7255 nfs4_state_shutdown(void)
7256 {
7257 destroy_workqueue(laundry_wq);
7258 nfsd4_destroy_callback_queue();
7259 cleanup_callback_cred();
7260 }
7261
7262 static void
7263 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7264 {
7265 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
7266 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7267 }
7268
7269 static void
7270 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7271 {
7272 if (cstate->minorversion) {
7273 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7274 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7275 }
7276 }
7277
7278 void
7279 clear_current_stateid(struct nfsd4_compound_state *cstate)
7280 {
7281 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7282 }
7283
7284 /*
7285 * functions to set current state id
7286 */
7287 void
7288 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7289 union nfsd4_op_u *u)
7290 {
7291 put_stateid(cstate, &u->open_downgrade.od_stateid);
7292 }
7293
7294 void
7295 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7296 union nfsd4_op_u *u)
7297 {
7298 put_stateid(cstate, &u->open.op_stateid);
7299 }
7300
7301 void
7302 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7303 union nfsd4_op_u *u)
7304 {
7305 put_stateid(cstate, &u->close.cl_stateid);
7306 }
7307
7308 void
7309 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7310 union nfsd4_op_u *u)
7311 {
7312 put_stateid(cstate, &u->lock.lk_resp_stateid);
7313 }
7314
7315 /*
7316 * functions to consume current state id
7317 */
7318
7319 void
7320 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7321 union nfsd4_op_u *u)
7322 {
7323 get_stateid(cstate, &u->open_downgrade.od_stateid);
7324 }
7325
7326 void
7327 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7328 union nfsd4_op_u *u)
7329 {
7330 get_stateid(cstate, &u->delegreturn.dr_stateid);
7331 }
7332
7333 void
7334 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7335 union nfsd4_op_u *u)
7336 {
7337 get_stateid(cstate, &u->free_stateid.fr_stateid);
7338 }
7339
7340 void
7341 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7342 union nfsd4_op_u *u)
7343 {
7344 get_stateid(cstate, &u->setattr.sa_stateid);
7345 }
7346
7347 void
7348 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7349 union nfsd4_op_u *u)
7350 {
7351 get_stateid(cstate, &u->close.cl_stateid);
7352 }
7353
7354 void
7355 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7356 union nfsd4_op_u *u)
7357 {
7358 get_stateid(cstate, &u->locku.lu_stateid);
7359 }
7360
7361 void
7362 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7363 union nfsd4_op_u *u)
7364 {
7365 get_stateid(cstate, &u->read.rd_stateid);
7366 }
7367
7368 void
7369 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7370 union nfsd4_op_u *u)
7371 {
7372 get_stateid(cstate, &u->write.wr_stateid);
7373 }