2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/file.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
48 #include "current_stateid.h"
53 #define NFSDDBG_FACILITY NFSDDBG_PROC
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid
= {
58 .si_opaque
= all_ones
,
60 static const stateid_t zero_stateid
= {
63 static const stateid_t currentstateid
= {
67 static u64 current_sessionid
= 1;
69 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
70 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
71 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
73 /* forward declarations */
74 static bool check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
);
75 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
);
80 * Currently used for the del_recall_lru and file hash table. In an
81 * effort to decrease the scope of the client_mutex, this spinlock may
82 * eventually cover more:
84 static DEFINE_SPINLOCK(state_lock
);
87 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
88 * the refcount on the open stateid to drop.
90 static DECLARE_WAIT_QUEUE_HEAD(close_wq
);
92 static struct kmem_cache
*openowner_slab
;
93 static struct kmem_cache
*lockowner_slab
;
94 static struct kmem_cache
*file_slab
;
95 static struct kmem_cache
*stateid_slab
;
96 static struct kmem_cache
*deleg_slab
;
97 static struct kmem_cache
*odstate_slab
;
99 static void free_session(struct nfsd4_session
*);
101 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops
;
102 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops
;
104 static bool is_session_dead(struct nfsd4_session
*ses
)
106 return ses
->se_flags
& NFS4_SESSION_DEAD
;
109 static __be32
mark_session_dead_locked(struct nfsd4_session
*ses
, int ref_held_by_me
)
111 if (atomic_read(&ses
->se_ref
) > ref_held_by_me
)
112 return nfserr_jukebox
;
113 ses
->se_flags
|= NFS4_SESSION_DEAD
;
117 static bool is_client_expired(struct nfs4_client
*clp
)
119 return clp
->cl_time
== 0;
122 static __be32
get_client_locked(struct nfs4_client
*clp
)
124 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
126 lockdep_assert_held(&nn
->client_lock
);
128 if (is_client_expired(clp
))
129 return nfserr_expired
;
130 atomic_inc(&clp
->cl_refcount
);
134 /* must be called under the client_lock */
136 renew_client_locked(struct nfs4_client
*clp
)
138 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
140 if (is_client_expired(clp
)) {
142 printk("%s: client (clientid %08x/%08x) already expired\n",
144 clp
->cl_clientid
.cl_boot
,
145 clp
->cl_clientid
.cl_id
);
149 dprintk("renewing client (clientid %08x/%08x)\n",
150 clp
->cl_clientid
.cl_boot
,
151 clp
->cl_clientid
.cl_id
);
152 list_move_tail(&clp
->cl_lru
, &nn
->client_lru
);
153 clp
->cl_time
= get_seconds();
156 static void put_client_renew_locked(struct nfs4_client
*clp
)
158 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
160 lockdep_assert_held(&nn
->client_lock
);
162 if (!atomic_dec_and_test(&clp
->cl_refcount
))
164 if (!is_client_expired(clp
))
165 renew_client_locked(clp
);
168 static void put_client_renew(struct nfs4_client
*clp
)
170 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
172 if (!atomic_dec_and_lock(&clp
->cl_refcount
, &nn
->client_lock
))
174 if (!is_client_expired(clp
))
175 renew_client_locked(clp
);
176 spin_unlock(&nn
->client_lock
);
179 static __be32
nfsd4_get_session_locked(struct nfsd4_session
*ses
)
183 if (is_session_dead(ses
))
184 return nfserr_badsession
;
185 status
= get_client_locked(ses
->se_client
);
188 atomic_inc(&ses
->se_ref
);
192 static void nfsd4_put_session_locked(struct nfsd4_session
*ses
)
194 struct nfs4_client
*clp
= ses
->se_client
;
195 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
197 lockdep_assert_held(&nn
->client_lock
);
199 if (atomic_dec_and_test(&ses
->se_ref
) && is_session_dead(ses
))
201 put_client_renew_locked(clp
);
204 static void nfsd4_put_session(struct nfsd4_session
*ses
)
206 struct nfs4_client
*clp
= ses
->se_client
;
207 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
209 spin_lock(&nn
->client_lock
);
210 nfsd4_put_session_locked(ses
);
211 spin_unlock(&nn
->client_lock
);
214 static struct nfsd4_blocked_lock
*
215 find_blocked_lock(struct nfs4_lockowner
*lo
, struct knfsd_fh
*fh
,
218 struct nfsd4_blocked_lock
*cur
, *found
= NULL
;
220 spin_lock(&nn
->blocked_locks_lock
);
221 list_for_each_entry(cur
, &lo
->lo_blocked
, nbl_list
) {
222 if (fh_match(fh
, &cur
->nbl_fh
)) {
223 list_del_init(&cur
->nbl_list
);
224 list_del_init(&cur
->nbl_lru
);
229 spin_unlock(&nn
->blocked_locks_lock
);
231 posix_unblock_lock(&found
->nbl_lock
);
235 static struct nfsd4_blocked_lock
*
236 find_or_allocate_block(struct nfs4_lockowner
*lo
, struct knfsd_fh
*fh
,
239 struct nfsd4_blocked_lock
*nbl
;
241 nbl
= find_blocked_lock(lo
, fh
, nn
);
243 nbl
= kmalloc(sizeof(*nbl
), GFP_KERNEL
);
245 fh_copy_shallow(&nbl
->nbl_fh
, fh
);
246 locks_init_lock(&nbl
->nbl_lock
);
247 nfsd4_init_cb(&nbl
->nbl_cb
, lo
->lo_owner
.so_client
,
248 &nfsd4_cb_notify_lock_ops
,
249 NFSPROC4_CLNT_CB_NOTIFY_LOCK
);
256 free_blocked_lock(struct nfsd4_blocked_lock
*nbl
)
258 locks_release_private(&nbl
->nbl_lock
);
263 nfsd4_cb_notify_lock_done(struct nfsd4_callback
*cb
, struct rpc_task
*task
)
266 * Since this is just an optimization, we don't try very hard if it
267 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
268 * just quit trying on anything else.
270 switch (task
->tk_status
) {
272 rpc_delay(task
, 1 * HZ
);
280 nfsd4_cb_notify_lock_release(struct nfsd4_callback
*cb
)
282 struct nfsd4_blocked_lock
*nbl
= container_of(cb
,
283 struct nfsd4_blocked_lock
, nbl_cb
);
285 free_blocked_lock(nbl
);
288 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops
= {
289 .done
= nfsd4_cb_notify_lock_done
,
290 .release
= nfsd4_cb_notify_lock_release
,
293 static inline struct nfs4_stateowner
*
294 nfs4_get_stateowner(struct nfs4_stateowner
*sop
)
296 atomic_inc(&sop
->so_count
);
301 same_owner_str(struct nfs4_stateowner
*sop
, struct xdr_netobj
*owner
)
303 return (sop
->so_owner
.len
== owner
->len
) &&
304 0 == memcmp(sop
->so_owner
.data
, owner
->data
, owner
->len
);
307 static struct nfs4_openowner
*
308 find_openstateowner_str_locked(unsigned int hashval
, struct nfsd4_open
*open
,
309 struct nfs4_client
*clp
)
311 struct nfs4_stateowner
*so
;
313 lockdep_assert_held(&clp
->cl_lock
);
315 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[hashval
],
317 if (!so
->so_is_open_owner
)
319 if (same_owner_str(so
, &open
->op_owner
))
320 return openowner(nfs4_get_stateowner(so
));
325 static struct nfs4_openowner
*
326 find_openstateowner_str(unsigned int hashval
, struct nfsd4_open
*open
,
327 struct nfs4_client
*clp
)
329 struct nfs4_openowner
*oo
;
331 spin_lock(&clp
->cl_lock
);
332 oo
= find_openstateowner_str_locked(hashval
, open
, clp
);
333 spin_unlock(&clp
->cl_lock
);
338 opaque_hashval(const void *ptr
, int nbytes
)
340 unsigned char *cptr
= (unsigned char *) ptr
;
350 static void nfsd4_free_file_rcu(struct rcu_head
*rcu
)
352 struct nfs4_file
*fp
= container_of(rcu
, struct nfs4_file
, fi_rcu
);
354 kmem_cache_free(file_slab
, fp
);
358 put_nfs4_file(struct nfs4_file
*fi
)
360 might_lock(&state_lock
);
362 if (refcount_dec_and_lock(&fi
->fi_ref
, &state_lock
)) {
363 hlist_del_rcu(&fi
->fi_hash
);
364 spin_unlock(&state_lock
);
365 WARN_ON_ONCE(!list_empty(&fi
->fi_clnt_odstate
));
366 WARN_ON_ONCE(!list_empty(&fi
->fi_delegations
));
367 call_rcu(&fi
->fi_rcu
, nfsd4_free_file_rcu
);
372 __nfs4_get_fd(struct nfs4_file
*f
, int oflag
)
374 if (f
->fi_fds
[oflag
])
375 return get_file(f
->fi_fds
[oflag
]);
380 find_writeable_file_locked(struct nfs4_file
*f
)
384 lockdep_assert_held(&f
->fi_lock
);
386 ret
= __nfs4_get_fd(f
, O_WRONLY
);
388 ret
= __nfs4_get_fd(f
, O_RDWR
);
393 find_writeable_file(struct nfs4_file
*f
)
397 spin_lock(&f
->fi_lock
);
398 ret
= find_writeable_file_locked(f
);
399 spin_unlock(&f
->fi_lock
);
404 static struct file
*find_readable_file_locked(struct nfs4_file
*f
)
408 lockdep_assert_held(&f
->fi_lock
);
410 ret
= __nfs4_get_fd(f
, O_RDONLY
);
412 ret
= __nfs4_get_fd(f
, O_RDWR
);
417 find_readable_file(struct nfs4_file
*f
)
421 spin_lock(&f
->fi_lock
);
422 ret
= find_readable_file_locked(f
);
423 spin_unlock(&f
->fi_lock
);
429 find_any_file(struct nfs4_file
*f
)
433 spin_lock(&f
->fi_lock
);
434 ret
= __nfs4_get_fd(f
, O_RDWR
);
436 ret
= __nfs4_get_fd(f
, O_WRONLY
);
438 ret
= __nfs4_get_fd(f
, O_RDONLY
);
440 spin_unlock(&f
->fi_lock
);
444 static atomic_long_t num_delegations
;
445 unsigned long max_delegations
;
448 * Open owner state (share locks)
451 /* hash tables for lock and open owners */
452 #define OWNER_HASH_BITS 8
453 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
454 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
456 static unsigned int ownerstr_hashval(struct xdr_netobj
*ownername
)
460 ret
= opaque_hashval(ownername
->data
, ownername
->len
);
461 return ret
& OWNER_HASH_MASK
;
464 /* hash table for nfs4_file */
465 #define FILE_HASH_BITS 8
466 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
468 static unsigned int nfsd_fh_hashval(struct knfsd_fh
*fh
)
470 return jhash2(fh
->fh_base
.fh_pad
, XDR_QUADLEN(fh
->fh_size
), 0);
473 static unsigned int file_hashval(struct knfsd_fh
*fh
)
475 return nfsd_fh_hashval(fh
) & (FILE_HASH_SIZE
- 1);
478 static struct hlist_head file_hashtbl
[FILE_HASH_SIZE
];
481 __nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
483 lockdep_assert_held(&fp
->fi_lock
);
485 if (access
& NFS4_SHARE_ACCESS_WRITE
)
486 atomic_inc(&fp
->fi_access
[O_WRONLY
]);
487 if (access
& NFS4_SHARE_ACCESS_READ
)
488 atomic_inc(&fp
->fi_access
[O_RDONLY
]);
492 nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
494 lockdep_assert_held(&fp
->fi_lock
);
496 /* Does this access mode make sense? */
497 if (access
& ~NFS4_SHARE_ACCESS_BOTH
)
500 /* Does it conflict with a deny mode already set? */
501 if ((access
& fp
->fi_share_deny
) != 0)
502 return nfserr_share_denied
;
504 __nfs4_file_get_access(fp
, access
);
508 static __be32
nfs4_file_check_deny(struct nfs4_file
*fp
, u32 deny
)
510 /* Common case is that there is no deny mode. */
512 /* Does this deny mode make sense? */
513 if (deny
& ~NFS4_SHARE_DENY_BOTH
)
516 if ((deny
& NFS4_SHARE_DENY_READ
) &&
517 atomic_read(&fp
->fi_access
[O_RDONLY
]))
518 return nfserr_share_denied
;
520 if ((deny
& NFS4_SHARE_DENY_WRITE
) &&
521 atomic_read(&fp
->fi_access
[O_WRONLY
]))
522 return nfserr_share_denied
;
527 static void __nfs4_file_put_access(struct nfs4_file
*fp
, int oflag
)
529 might_lock(&fp
->fi_lock
);
531 if (atomic_dec_and_lock(&fp
->fi_access
[oflag
], &fp
->fi_lock
)) {
532 struct file
*f1
= NULL
;
533 struct file
*f2
= NULL
;
535 swap(f1
, fp
->fi_fds
[oflag
]);
536 if (atomic_read(&fp
->fi_access
[1 - oflag
]) == 0)
537 swap(f2
, fp
->fi_fds
[O_RDWR
]);
538 spin_unlock(&fp
->fi_lock
);
546 static void nfs4_file_put_access(struct nfs4_file
*fp
, u32 access
)
548 WARN_ON_ONCE(access
& ~NFS4_SHARE_ACCESS_BOTH
);
550 if (access
& NFS4_SHARE_ACCESS_WRITE
)
551 __nfs4_file_put_access(fp
, O_WRONLY
);
552 if (access
& NFS4_SHARE_ACCESS_READ
)
553 __nfs4_file_put_access(fp
, O_RDONLY
);
557 * Allocate a new open/delegation state counter. This is needed for
558 * pNFS for proper return on close semantics.
560 * Note that we only allocate it for pNFS-enabled exports, otherwise
561 * all pointers to struct nfs4_clnt_odstate are always NULL.
563 static struct nfs4_clnt_odstate
*
564 alloc_clnt_odstate(struct nfs4_client
*clp
)
566 struct nfs4_clnt_odstate
*co
;
568 co
= kmem_cache_zalloc(odstate_slab
, GFP_KERNEL
);
571 refcount_set(&co
->co_odcount
, 1);
577 hash_clnt_odstate_locked(struct nfs4_clnt_odstate
*co
)
579 struct nfs4_file
*fp
= co
->co_file
;
581 lockdep_assert_held(&fp
->fi_lock
);
582 list_add(&co
->co_perfile
, &fp
->fi_clnt_odstate
);
586 get_clnt_odstate(struct nfs4_clnt_odstate
*co
)
589 refcount_inc(&co
->co_odcount
);
593 put_clnt_odstate(struct nfs4_clnt_odstate
*co
)
595 struct nfs4_file
*fp
;
601 if (refcount_dec_and_lock(&co
->co_odcount
, &fp
->fi_lock
)) {
602 list_del(&co
->co_perfile
);
603 spin_unlock(&fp
->fi_lock
);
605 nfsd4_return_all_file_layouts(co
->co_client
, fp
);
606 kmem_cache_free(odstate_slab
, co
);
610 static struct nfs4_clnt_odstate
*
611 find_or_hash_clnt_odstate(struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*new)
613 struct nfs4_clnt_odstate
*co
;
614 struct nfs4_client
*cl
;
621 spin_lock(&fp
->fi_lock
);
622 list_for_each_entry(co
, &fp
->fi_clnt_odstate
, co_perfile
) {
623 if (co
->co_client
== cl
) {
624 get_clnt_odstate(co
);
630 hash_clnt_odstate_locked(new);
632 spin_unlock(&fp
->fi_lock
);
636 struct nfs4_stid
*nfs4_alloc_stid(struct nfs4_client
*cl
, struct kmem_cache
*slab
,
637 void (*sc_free
)(struct nfs4_stid
*))
639 struct nfs4_stid
*stid
;
642 stid
= kmem_cache_zalloc(slab
, GFP_KERNEL
);
646 idr_preload(GFP_KERNEL
);
647 spin_lock(&cl
->cl_lock
);
648 new_id
= idr_alloc_cyclic(&cl
->cl_stateids
, stid
, 0, 0, GFP_NOWAIT
);
649 spin_unlock(&cl
->cl_lock
);
654 stid
->sc_free
= sc_free
;
655 stid
->sc_client
= cl
;
656 stid
->sc_stateid
.si_opaque
.so_id
= new_id
;
657 stid
->sc_stateid
.si_opaque
.so_clid
= cl
->cl_clientid
;
658 /* Will be incremented before return to client: */
659 refcount_set(&stid
->sc_count
, 1);
660 spin_lock_init(&stid
->sc_lock
);
663 * It shouldn't be a problem to reuse an opaque stateid value.
664 * I don't think it is for 4.1. But with 4.0 I worry that, for
665 * example, a stray write retransmission could be accepted by
666 * the server when it should have been rejected. Therefore,
667 * adopt a trick from the sctp code to attempt to maximize the
668 * amount of time until an id is reused, by ensuring they always
669 * "increase" (mod INT_MAX):
673 kmem_cache_free(slab
, stid
);
677 static struct nfs4_ol_stateid
* nfs4_alloc_open_stateid(struct nfs4_client
*clp
)
679 struct nfs4_stid
*stid
;
681 stid
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_ol_stateid
);
685 return openlockstateid(stid
);
688 static void nfs4_free_deleg(struct nfs4_stid
*stid
)
690 kmem_cache_free(deleg_slab
, stid
);
691 atomic_long_dec(&num_delegations
);
695 * When we recall a delegation, we should be careful not to hand it
696 * out again straight away.
697 * To ensure this we keep a pair of bloom filters ('new' and 'old')
698 * in which the filehandles of recalled delegations are "stored".
699 * If a filehandle appear in either filter, a delegation is blocked.
700 * When a delegation is recalled, the filehandle is stored in the "new"
702 * Every 30 seconds we swap the filters and clear the "new" one,
703 * unless both are empty of course.
705 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
706 * low 3 bytes as hash-table indices.
708 * 'blocked_delegations_lock', which is always taken in block_delegations(),
709 * is used to manage concurrent access. Testing does not need the lock
710 * except when swapping the two filters.
712 static DEFINE_SPINLOCK(blocked_delegations_lock
);
713 static struct bloom_pair
{
714 int entries
, old_entries
;
716 int new; /* index into 'set' */
717 DECLARE_BITMAP(set
[2], 256);
718 } blocked_delegations
;
720 static int delegation_blocked(struct knfsd_fh
*fh
)
723 struct bloom_pair
*bd
= &blocked_delegations
;
725 if (bd
->entries
== 0)
727 if (seconds_since_boot() - bd
->swap_time
> 30) {
728 spin_lock(&blocked_delegations_lock
);
729 if (seconds_since_boot() - bd
->swap_time
> 30) {
730 bd
->entries
-= bd
->old_entries
;
731 bd
->old_entries
= bd
->entries
;
732 memset(bd
->set
[bd
->new], 0,
735 bd
->swap_time
= seconds_since_boot();
737 spin_unlock(&blocked_delegations_lock
);
739 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
740 if (test_bit(hash
&255, bd
->set
[0]) &&
741 test_bit((hash
>>8)&255, bd
->set
[0]) &&
742 test_bit((hash
>>16)&255, bd
->set
[0]))
745 if (test_bit(hash
&255, bd
->set
[1]) &&
746 test_bit((hash
>>8)&255, bd
->set
[1]) &&
747 test_bit((hash
>>16)&255, bd
->set
[1]))
753 static void block_delegations(struct knfsd_fh
*fh
)
756 struct bloom_pair
*bd
= &blocked_delegations
;
758 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
760 spin_lock(&blocked_delegations_lock
);
761 __set_bit(hash
&255, bd
->set
[bd
->new]);
762 __set_bit((hash
>>8)&255, bd
->set
[bd
->new]);
763 __set_bit((hash
>>16)&255, bd
->set
[bd
->new]);
764 if (bd
->entries
== 0)
765 bd
->swap_time
= seconds_since_boot();
767 spin_unlock(&blocked_delegations_lock
);
770 static struct nfs4_delegation
*
771 alloc_init_deleg(struct nfs4_client
*clp
, struct svc_fh
*current_fh
,
772 struct nfs4_clnt_odstate
*odstate
)
774 struct nfs4_delegation
*dp
;
777 dprintk("NFSD alloc_init_deleg\n");
778 n
= atomic_long_inc_return(&num_delegations
);
779 if (n
< 0 || n
> max_delegations
)
781 if (delegation_blocked(¤t_fh
->fh_handle
))
783 dp
= delegstateid(nfs4_alloc_stid(clp
, deleg_slab
, nfs4_free_deleg
));
788 * delegation seqid's are never incremented. The 4.1 special
789 * meaning of seqid 0 isn't meaningful, really, but let's avoid
790 * 0 anyway just for consistency and use 1:
792 dp
->dl_stid
.sc_stateid
.si_generation
= 1;
793 INIT_LIST_HEAD(&dp
->dl_perfile
);
794 INIT_LIST_HEAD(&dp
->dl_perclnt
);
795 INIT_LIST_HEAD(&dp
->dl_recall_lru
);
796 dp
->dl_clnt_odstate
= odstate
;
797 get_clnt_odstate(odstate
);
798 dp
->dl_type
= NFS4_OPEN_DELEGATE_READ
;
800 nfsd4_init_cb(&dp
->dl_recall
, dp
->dl_stid
.sc_client
,
801 &nfsd4_cb_recall_ops
, NFSPROC4_CLNT_CB_RECALL
);
804 atomic_long_dec(&num_delegations
);
809 nfs4_put_stid(struct nfs4_stid
*s
)
811 struct nfs4_file
*fp
= s
->sc_file
;
812 struct nfs4_client
*clp
= s
->sc_client
;
814 might_lock(&clp
->cl_lock
);
816 if (!refcount_dec_and_lock(&s
->sc_count
, &clp
->cl_lock
)) {
817 wake_up_all(&close_wq
);
820 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
821 spin_unlock(&clp
->cl_lock
);
828 nfs4_inc_and_copy_stateid(stateid_t
*dst
, struct nfs4_stid
*stid
)
830 stateid_t
*src
= &stid
->sc_stateid
;
832 spin_lock(&stid
->sc_lock
);
833 if (unlikely(++src
->si_generation
== 0))
834 src
->si_generation
= 1;
835 memcpy(dst
, src
, sizeof(*dst
));
836 spin_unlock(&stid
->sc_lock
);
839 static void nfs4_put_deleg_lease(struct nfs4_file
*fp
)
841 struct file
*filp
= NULL
;
843 spin_lock(&fp
->fi_lock
);
844 if (fp
->fi_deleg_file
&& --fp
->fi_delegees
== 0)
845 swap(filp
, fp
->fi_deleg_file
);
846 spin_unlock(&fp
->fi_lock
);
849 vfs_setlease(filp
, F_UNLCK
, NULL
, (void **)&fp
);
854 void nfs4_unhash_stid(struct nfs4_stid
*s
)
860 * nfs4_get_existing_delegation - Discover if this delegation already exists
861 * @clp: a pointer to the nfs4_client we're granting a delegation to
862 * @fp: a pointer to the nfs4_file we're granting a delegation on
865 * On success: NULL if an existing delegation was not found.
867 * On error: -EAGAIN if one was previously granted to this nfs4_client
868 * for this nfs4_file.
873 nfs4_get_existing_delegation(struct nfs4_client
*clp
, struct nfs4_file
*fp
)
875 struct nfs4_delegation
*searchdp
= NULL
;
876 struct nfs4_client
*searchclp
= NULL
;
878 lockdep_assert_held(&state_lock
);
879 lockdep_assert_held(&fp
->fi_lock
);
881 list_for_each_entry(searchdp
, &fp
->fi_delegations
, dl_perfile
) {
882 searchclp
= searchdp
->dl_stid
.sc_client
;
883 if (clp
== searchclp
) {
891 * hash_delegation_locked - Add a delegation to the appropriate lists
892 * @dp: a pointer to the nfs4_delegation we are adding.
893 * @fp: a pointer to the nfs4_file we're granting a delegation on
896 * On success: NULL if the delegation was successfully hashed.
898 * On error: -EAGAIN if one was previously granted to this
899 * nfs4_client for this nfs4_file. Delegation is not hashed.
904 hash_delegation_locked(struct nfs4_delegation
*dp
, struct nfs4_file
*fp
)
907 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
909 lockdep_assert_held(&state_lock
);
910 lockdep_assert_held(&fp
->fi_lock
);
912 status
= nfs4_get_existing_delegation(clp
, fp
);
916 refcount_inc(&dp
->dl_stid
.sc_count
);
917 dp
->dl_stid
.sc_type
= NFS4_DELEG_STID
;
918 list_add(&dp
->dl_perfile
, &fp
->fi_delegations
);
919 list_add(&dp
->dl_perclnt
, &clp
->cl_delegations
);
924 unhash_delegation_locked(struct nfs4_delegation
*dp
)
926 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
928 lockdep_assert_held(&state_lock
);
930 if (list_empty(&dp
->dl_perfile
))
933 dp
->dl_stid
.sc_type
= NFS4_CLOSED_DELEG_STID
;
934 /* Ensure that deleg break won't try to requeue it */
936 spin_lock(&fp
->fi_lock
);
937 list_del_init(&dp
->dl_perclnt
);
938 list_del_init(&dp
->dl_recall_lru
);
939 list_del_init(&dp
->dl_perfile
);
940 spin_unlock(&fp
->fi_lock
);
944 static void destroy_delegation(struct nfs4_delegation
*dp
)
948 spin_lock(&state_lock
);
949 unhashed
= unhash_delegation_locked(dp
);
950 spin_unlock(&state_lock
);
952 put_clnt_odstate(dp
->dl_clnt_odstate
);
953 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
954 nfs4_put_stid(&dp
->dl_stid
);
958 static void revoke_delegation(struct nfs4_delegation
*dp
)
960 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
962 WARN_ON(!list_empty(&dp
->dl_recall_lru
));
964 put_clnt_odstate(dp
->dl_clnt_odstate
);
965 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
967 if (clp
->cl_minorversion
== 0)
968 nfs4_put_stid(&dp
->dl_stid
);
970 dp
->dl_stid
.sc_type
= NFS4_REVOKED_DELEG_STID
;
971 spin_lock(&clp
->cl_lock
);
972 list_add(&dp
->dl_recall_lru
, &clp
->cl_revoked
);
973 spin_unlock(&clp
->cl_lock
);
981 static unsigned int clientid_hashval(u32 id
)
983 return id
& CLIENT_HASH_MASK
;
986 static unsigned int clientstr_hashval(const char *name
)
988 return opaque_hashval(name
, 8) & CLIENT_HASH_MASK
;
992 * We store the NONE, READ, WRITE, and BOTH bits separately in the
993 * st_{access,deny}_bmap field of the stateid, in order to track not
994 * only what share bits are currently in force, but also what
995 * combinations of share bits previous opens have used. This allows us
996 * to enforce the recommendation of rfc 3530 14.2.19 that the server
997 * return an error if the client attempt to downgrade to a combination
998 * of share bits not explicable by closing some of its previous opens.
1000 * XXX: This enforcement is actually incomplete, since we don't keep
1001 * track of access/deny bit combinations; so, e.g., we allow:
1003 * OPEN allow read, deny write
1004 * OPEN allow both, deny none
1005 * DOWNGRADE allow read, deny none
1007 * which we should reject.
1010 bmap_to_share_mode(unsigned long bmap
) {
1012 unsigned int access
= 0;
1014 for (i
= 1; i
< 4; i
++) {
1015 if (test_bit(i
, &bmap
))
1021 /* set share access for a given stateid */
1023 set_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1025 unsigned char mask
= 1 << access
;
1027 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
1028 stp
->st_access_bmap
|= mask
;
1031 /* clear share access for a given stateid */
1033 clear_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1035 unsigned char mask
= 1 << access
;
1037 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
1038 stp
->st_access_bmap
&= ~mask
;
1041 /* test whether a given stateid has access */
1043 test_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1045 unsigned char mask
= 1 << access
;
1047 return (bool)(stp
->st_access_bmap
& mask
);
1050 /* set share deny for a given stateid */
1052 set_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1054 unsigned char mask
= 1 << deny
;
1056 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
1057 stp
->st_deny_bmap
|= mask
;
1060 /* clear share deny for a given stateid */
1062 clear_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1064 unsigned char mask
= 1 << deny
;
1066 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
1067 stp
->st_deny_bmap
&= ~mask
;
1070 /* test whether a given stateid is denying specific access */
1072 test_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1074 unsigned char mask
= 1 << deny
;
1076 return (bool)(stp
->st_deny_bmap
& mask
);
1079 static int nfs4_access_to_omode(u32 access
)
1081 switch (access
& NFS4_SHARE_ACCESS_BOTH
) {
1082 case NFS4_SHARE_ACCESS_READ
:
1084 case NFS4_SHARE_ACCESS_WRITE
:
1086 case NFS4_SHARE_ACCESS_BOTH
:
1094 * A stateid that had a deny mode associated with it is being released
1095 * or downgraded. Recalculate the deny mode on the file.
1098 recalculate_deny_mode(struct nfs4_file
*fp
)
1100 struct nfs4_ol_stateid
*stp
;
1102 spin_lock(&fp
->fi_lock
);
1103 fp
->fi_share_deny
= 0;
1104 list_for_each_entry(stp
, &fp
->fi_stateids
, st_perfile
)
1105 fp
->fi_share_deny
|= bmap_to_share_mode(stp
->st_deny_bmap
);
1106 spin_unlock(&fp
->fi_lock
);
1110 reset_union_bmap_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1113 bool change
= false;
1115 for (i
= 1; i
< 4; i
++) {
1116 if ((i
& deny
) != i
) {
1122 /* Recalculate per-file deny mode if there was a change */
1124 recalculate_deny_mode(stp
->st_stid
.sc_file
);
1127 /* release all access and file references for a given stateid */
1129 release_all_access(struct nfs4_ol_stateid
*stp
)
1132 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1134 if (fp
&& stp
->st_deny_bmap
!= 0)
1135 recalculate_deny_mode(fp
);
1137 for (i
= 1; i
< 4; i
++) {
1138 if (test_access(i
, stp
))
1139 nfs4_file_put_access(stp
->st_stid
.sc_file
, i
);
1140 clear_access(i
, stp
);
1144 static inline void nfs4_free_stateowner(struct nfs4_stateowner
*sop
)
1146 kfree(sop
->so_owner
.data
);
1147 sop
->so_ops
->so_free(sop
);
1150 static void nfs4_put_stateowner(struct nfs4_stateowner
*sop
)
1152 struct nfs4_client
*clp
= sop
->so_client
;
1154 might_lock(&clp
->cl_lock
);
1156 if (!atomic_dec_and_lock(&sop
->so_count
, &clp
->cl_lock
))
1158 sop
->so_ops
->so_unhash(sop
);
1159 spin_unlock(&clp
->cl_lock
);
1160 nfs4_free_stateowner(sop
);
1163 static bool unhash_ol_stateid(struct nfs4_ol_stateid
*stp
)
1165 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1167 lockdep_assert_held(&stp
->st_stateowner
->so_client
->cl_lock
);
1169 if (list_empty(&stp
->st_perfile
))
1172 spin_lock(&fp
->fi_lock
);
1173 list_del_init(&stp
->st_perfile
);
1174 spin_unlock(&fp
->fi_lock
);
1175 list_del(&stp
->st_perstateowner
);
1179 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
)
1181 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1183 put_clnt_odstate(stp
->st_clnt_odstate
);
1184 release_all_access(stp
);
1185 if (stp
->st_stateowner
)
1186 nfs4_put_stateowner(stp
->st_stateowner
);
1187 kmem_cache_free(stateid_slab
, stid
);
1190 static void nfs4_free_lock_stateid(struct nfs4_stid
*stid
)
1192 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1193 struct nfs4_lockowner
*lo
= lockowner(stp
->st_stateowner
);
1196 file
= find_any_file(stp
->st_stid
.sc_file
);
1198 filp_close(file
, (fl_owner_t
)lo
);
1199 nfs4_free_ol_stateid(stid
);
1203 * Put the persistent reference to an already unhashed generic stateid, while
1204 * holding the cl_lock. If it's the last reference, then put it onto the
1205 * reaplist for later destruction.
1207 static void put_ol_stateid_locked(struct nfs4_ol_stateid
*stp
,
1208 struct list_head
*reaplist
)
1210 struct nfs4_stid
*s
= &stp
->st_stid
;
1211 struct nfs4_client
*clp
= s
->sc_client
;
1213 lockdep_assert_held(&clp
->cl_lock
);
1215 WARN_ON_ONCE(!list_empty(&stp
->st_locks
));
1217 if (!refcount_dec_and_test(&s
->sc_count
)) {
1218 wake_up_all(&close_wq
);
1222 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
1223 list_add(&stp
->st_locks
, reaplist
);
1226 static bool unhash_lock_stateid(struct nfs4_ol_stateid
*stp
)
1228 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1230 list_del_init(&stp
->st_locks
);
1231 nfs4_unhash_stid(&stp
->st_stid
);
1232 return unhash_ol_stateid(stp
);
1235 static void release_lock_stateid(struct nfs4_ol_stateid
*stp
)
1237 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
1240 spin_lock(&clp
->cl_lock
);
1241 unhashed
= unhash_lock_stateid(stp
);
1242 spin_unlock(&clp
->cl_lock
);
1244 nfs4_put_stid(&stp
->st_stid
);
1247 static void unhash_lockowner_locked(struct nfs4_lockowner
*lo
)
1249 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
1251 lockdep_assert_held(&clp
->cl_lock
);
1253 list_del_init(&lo
->lo_owner
.so_strhash
);
1257 * Free a list of generic stateids that were collected earlier after being
1261 free_ol_stateid_reaplist(struct list_head
*reaplist
)
1263 struct nfs4_ol_stateid
*stp
;
1264 struct nfs4_file
*fp
;
1268 while (!list_empty(reaplist
)) {
1269 stp
= list_first_entry(reaplist
, struct nfs4_ol_stateid
,
1271 list_del(&stp
->st_locks
);
1272 fp
= stp
->st_stid
.sc_file
;
1273 stp
->st_stid
.sc_free(&stp
->st_stid
);
1279 static void release_open_stateid_locks(struct nfs4_ol_stateid
*open_stp
,
1280 struct list_head
*reaplist
)
1282 struct nfs4_ol_stateid
*stp
;
1284 lockdep_assert_held(&open_stp
->st_stid
.sc_client
->cl_lock
);
1286 while (!list_empty(&open_stp
->st_locks
)) {
1287 stp
= list_entry(open_stp
->st_locks
.next
,
1288 struct nfs4_ol_stateid
, st_locks
);
1289 WARN_ON(!unhash_lock_stateid(stp
));
1290 put_ol_stateid_locked(stp
, reaplist
);
1294 static bool unhash_open_stateid(struct nfs4_ol_stateid
*stp
,
1295 struct list_head
*reaplist
)
1299 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1301 unhashed
= unhash_ol_stateid(stp
);
1302 release_open_stateid_locks(stp
, reaplist
);
1306 static void release_open_stateid(struct nfs4_ol_stateid
*stp
)
1308 LIST_HEAD(reaplist
);
1310 spin_lock(&stp
->st_stid
.sc_client
->cl_lock
);
1311 if (unhash_open_stateid(stp
, &reaplist
))
1312 put_ol_stateid_locked(stp
, &reaplist
);
1313 spin_unlock(&stp
->st_stid
.sc_client
->cl_lock
);
1314 free_ol_stateid_reaplist(&reaplist
);
1317 static void unhash_openowner_locked(struct nfs4_openowner
*oo
)
1319 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1321 lockdep_assert_held(&clp
->cl_lock
);
1323 list_del_init(&oo
->oo_owner
.so_strhash
);
1324 list_del_init(&oo
->oo_perclient
);
1327 static void release_last_closed_stateid(struct nfs4_openowner
*oo
)
1329 struct nfsd_net
*nn
= net_generic(oo
->oo_owner
.so_client
->net
,
1331 struct nfs4_ol_stateid
*s
;
1333 spin_lock(&nn
->client_lock
);
1334 s
= oo
->oo_last_closed_stid
;
1336 list_del_init(&oo
->oo_close_lru
);
1337 oo
->oo_last_closed_stid
= NULL
;
1339 spin_unlock(&nn
->client_lock
);
1341 nfs4_put_stid(&s
->st_stid
);
1344 static void release_openowner(struct nfs4_openowner
*oo
)
1346 struct nfs4_ol_stateid
*stp
;
1347 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1348 struct list_head reaplist
;
1350 INIT_LIST_HEAD(&reaplist
);
1352 spin_lock(&clp
->cl_lock
);
1353 unhash_openowner_locked(oo
);
1354 while (!list_empty(&oo
->oo_owner
.so_stateids
)) {
1355 stp
= list_first_entry(&oo
->oo_owner
.so_stateids
,
1356 struct nfs4_ol_stateid
, st_perstateowner
);
1357 if (unhash_open_stateid(stp
, &reaplist
))
1358 put_ol_stateid_locked(stp
, &reaplist
);
1360 spin_unlock(&clp
->cl_lock
);
1361 free_ol_stateid_reaplist(&reaplist
);
1362 release_last_closed_stateid(oo
);
1363 nfs4_put_stateowner(&oo
->oo_owner
);
1367 hash_sessionid(struct nfs4_sessionid
*sessionid
)
1369 struct nfsd4_sessionid
*sid
= (struct nfsd4_sessionid
*)sessionid
;
1371 return sid
->sequence
% SESSION_HASH_SIZE
;
1374 #ifdef CONFIG_SUNRPC_DEBUG
1376 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1378 u32
*ptr
= (u32
*)(&sessionid
->data
[0]);
1379 dprintk("%s: %u:%u:%u:%u\n", fn
, ptr
[0], ptr
[1], ptr
[2], ptr
[3]);
1383 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1389 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1390 * won't be used for replay.
1392 void nfsd4_bump_seqid(struct nfsd4_compound_state
*cstate
, __be32 nfserr
)
1394 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
1396 if (nfserr
== nfserr_replay_me
)
1399 if (!seqid_mutating_err(ntohl(nfserr
))) {
1400 nfsd4_cstate_clear_replay(cstate
);
1405 if (so
->so_is_open_owner
)
1406 release_last_closed_stateid(openowner(so
));
1412 gen_sessionid(struct nfsd4_session
*ses
)
1414 struct nfs4_client
*clp
= ses
->se_client
;
1415 struct nfsd4_sessionid
*sid
;
1417 sid
= (struct nfsd4_sessionid
*)ses
->se_sessionid
.data
;
1418 sid
->clientid
= clp
->cl_clientid
;
1419 sid
->sequence
= current_sessionid
++;
1424 * The protocol defines ca_maxresponssize_cached to include the size of
1425 * the rpc header, but all we need to cache is the data starting after
1426 * the end of the initial SEQUENCE operation--the rest we regenerate
1427 * each time. Therefore we can advertise a ca_maxresponssize_cached
1428 * value that is the number of bytes in our cache plus a few additional
1429 * bytes. In order to stay on the safe side, and not promise more than
1430 * we can cache, those additional bytes must be the minimum possible: 24
1431 * bytes of rpc header (xid through accept state, with AUTH_NULL
1432 * verifier), 12 for the compound header (with zero-length tag), and 44
1433 * for the SEQUENCE op response:
1435 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1438 free_session_slots(struct nfsd4_session
*ses
)
1442 for (i
= 0; i
< ses
->se_fchannel
.maxreqs
; i
++) {
1443 free_svc_cred(&ses
->se_slots
[i
]->sl_cred
);
1444 kfree(ses
->se_slots
[i
]);
1449 * We don't actually need to cache the rpc and session headers, so we
1450 * can allocate a little less for each slot:
1452 static inline u32
slot_bytes(struct nfsd4_channel_attrs
*ca
)
1456 if (ca
->maxresp_cached
< NFSD_MIN_HDR_SEQ_SZ
)
1459 size
= ca
->maxresp_cached
- NFSD_MIN_HDR_SEQ_SZ
;
1460 return size
+ sizeof(struct nfsd4_slot
);
1464 * XXX: If we run out of reserved DRC memory we could (up to a point)
1465 * re-negotiate active sessions and reduce their slot usage to make
1466 * room for new connections. For now we just fail the create session.
1468 static u32
nfsd4_get_drc_mem(struct nfsd4_channel_attrs
*ca
)
1470 u32 slotsize
= slot_bytes(ca
);
1471 u32 num
= ca
->maxreqs
;
1474 spin_lock(&nfsd_drc_lock
);
1475 avail
= min((unsigned long)NFSD_MAX_MEM_PER_SESSION
,
1476 nfsd_drc_max_mem
- nfsd_drc_mem_used
);
1478 * Never use more than a third of the remaining memory,
1479 * unless it's the only way to give this client a slot:
1481 avail
= clamp_t(int, avail
, slotsize
, avail
/3);
1482 num
= min_t(int, num
, avail
/ slotsize
);
1483 nfsd_drc_mem_used
+= num
* slotsize
;
1484 spin_unlock(&nfsd_drc_lock
);
1489 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs
*ca
)
1491 int slotsize
= slot_bytes(ca
);
1493 spin_lock(&nfsd_drc_lock
);
1494 nfsd_drc_mem_used
-= slotsize
* ca
->maxreqs
;
1495 spin_unlock(&nfsd_drc_lock
);
1498 static struct nfsd4_session
*alloc_session(struct nfsd4_channel_attrs
*fattrs
,
1499 struct nfsd4_channel_attrs
*battrs
)
1501 int numslots
= fattrs
->maxreqs
;
1502 int slotsize
= slot_bytes(fattrs
);
1503 struct nfsd4_session
*new;
1506 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION
* sizeof(struct nfsd4_slot
*)
1507 + sizeof(struct nfsd4_session
) > PAGE_SIZE
);
1508 mem
= numslots
* sizeof(struct nfsd4_slot
*);
1510 new = kzalloc(sizeof(*new) + mem
, GFP_KERNEL
);
1513 /* allocate each struct nfsd4_slot and data cache in one piece */
1514 for (i
= 0; i
< numslots
; i
++) {
1515 new->se_slots
[i
] = kzalloc(slotsize
, GFP_KERNEL
);
1516 if (!new->se_slots
[i
])
1520 memcpy(&new->se_fchannel
, fattrs
, sizeof(struct nfsd4_channel_attrs
));
1521 memcpy(&new->se_bchannel
, battrs
, sizeof(struct nfsd4_channel_attrs
));
1526 kfree(new->se_slots
[i
]);
1531 static void free_conn(struct nfsd4_conn
*c
)
1533 svc_xprt_put(c
->cn_xprt
);
1537 static void nfsd4_conn_lost(struct svc_xpt_user
*u
)
1539 struct nfsd4_conn
*c
= container_of(u
, struct nfsd4_conn
, cn_xpt_user
);
1540 struct nfs4_client
*clp
= c
->cn_session
->se_client
;
1542 spin_lock(&clp
->cl_lock
);
1543 if (!list_empty(&c
->cn_persession
)) {
1544 list_del(&c
->cn_persession
);
1547 nfsd4_probe_callback(clp
);
1548 spin_unlock(&clp
->cl_lock
);
1551 static struct nfsd4_conn
*alloc_conn(struct svc_rqst
*rqstp
, u32 flags
)
1553 struct nfsd4_conn
*conn
;
1555 conn
= kmalloc(sizeof(struct nfsd4_conn
), GFP_KERNEL
);
1558 svc_xprt_get(rqstp
->rq_xprt
);
1559 conn
->cn_xprt
= rqstp
->rq_xprt
;
1560 conn
->cn_flags
= flags
;
1561 INIT_LIST_HEAD(&conn
->cn_xpt_user
.list
);
1565 static void __nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1567 conn
->cn_session
= ses
;
1568 list_add(&conn
->cn_persession
, &ses
->se_conns
);
1571 static void nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1573 struct nfs4_client
*clp
= ses
->se_client
;
1575 spin_lock(&clp
->cl_lock
);
1576 __nfsd4_hash_conn(conn
, ses
);
1577 spin_unlock(&clp
->cl_lock
);
1580 static int nfsd4_register_conn(struct nfsd4_conn
*conn
)
1582 conn
->cn_xpt_user
.callback
= nfsd4_conn_lost
;
1583 return register_xpt_user(conn
->cn_xprt
, &conn
->cn_xpt_user
);
1586 static void nfsd4_init_conn(struct svc_rqst
*rqstp
, struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1590 nfsd4_hash_conn(conn
, ses
);
1591 ret
= nfsd4_register_conn(conn
);
1593 /* oops; xprt is already down: */
1594 nfsd4_conn_lost(&conn
->cn_xpt_user
);
1595 /* We may have gained or lost a callback channel: */
1596 nfsd4_probe_callback_sync(ses
->se_client
);
1599 static struct nfsd4_conn
*alloc_conn_from_crses(struct svc_rqst
*rqstp
, struct nfsd4_create_session
*cses
)
1601 u32 dir
= NFS4_CDFC4_FORE
;
1603 if (cses
->flags
& SESSION4_BACK_CHAN
)
1604 dir
|= NFS4_CDFC4_BACK
;
1605 return alloc_conn(rqstp
, dir
);
1608 /* must be called under client_lock */
1609 static void nfsd4_del_conns(struct nfsd4_session
*s
)
1611 struct nfs4_client
*clp
= s
->se_client
;
1612 struct nfsd4_conn
*c
;
1614 spin_lock(&clp
->cl_lock
);
1615 while (!list_empty(&s
->se_conns
)) {
1616 c
= list_first_entry(&s
->se_conns
, struct nfsd4_conn
, cn_persession
);
1617 list_del_init(&c
->cn_persession
);
1618 spin_unlock(&clp
->cl_lock
);
1620 unregister_xpt_user(c
->cn_xprt
, &c
->cn_xpt_user
);
1623 spin_lock(&clp
->cl_lock
);
1625 spin_unlock(&clp
->cl_lock
);
1628 static void __free_session(struct nfsd4_session
*ses
)
1630 free_session_slots(ses
);
1634 static void free_session(struct nfsd4_session
*ses
)
1636 nfsd4_del_conns(ses
);
1637 nfsd4_put_drc_mem(&ses
->se_fchannel
);
1638 __free_session(ses
);
1641 static void init_session(struct svc_rqst
*rqstp
, struct nfsd4_session
*new, struct nfs4_client
*clp
, struct nfsd4_create_session
*cses
)
1644 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
1646 new->se_client
= clp
;
1649 INIT_LIST_HEAD(&new->se_conns
);
1651 new->se_cb_seq_nr
= 1;
1652 new->se_flags
= cses
->flags
;
1653 new->se_cb_prog
= cses
->callback_prog
;
1654 new->se_cb_sec
= cses
->cb_sec
;
1655 atomic_set(&new->se_ref
, 0);
1656 idx
= hash_sessionid(&new->se_sessionid
);
1657 list_add(&new->se_hash
, &nn
->sessionid_hashtbl
[idx
]);
1658 spin_lock(&clp
->cl_lock
);
1659 list_add(&new->se_perclnt
, &clp
->cl_sessions
);
1660 spin_unlock(&clp
->cl_lock
);
1663 struct sockaddr
*sa
= svc_addr(rqstp
);
1665 * This is a little silly; with sessions there's no real
1666 * use for the callback address. Use the peer address
1667 * as a reasonable default for now, but consider fixing
1668 * the rpc client not to require an address in the
1671 rpc_copy_addr((struct sockaddr
*)&clp
->cl_cb_conn
.cb_addr
, sa
);
1672 clp
->cl_cb_conn
.cb_addrlen
= svc_addr_len(sa
);
1676 /* caller must hold client_lock */
1677 static struct nfsd4_session
*
1678 __find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
)
1680 struct nfsd4_session
*elem
;
1682 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
1684 lockdep_assert_held(&nn
->client_lock
);
1686 dump_sessionid(__func__
, sessionid
);
1687 idx
= hash_sessionid(sessionid
);
1688 /* Search in the appropriate list */
1689 list_for_each_entry(elem
, &nn
->sessionid_hashtbl
[idx
], se_hash
) {
1690 if (!memcmp(elem
->se_sessionid
.data
, sessionid
->data
,
1691 NFS4_MAX_SESSIONID_LEN
)) {
1696 dprintk("%s: session not found\n", __func__
);
1700 static struct nfsd4_session
*
1701 find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
,
1704 struct nfsd4_session
*session
;
1705 __be32 status
= nfserr_badsession
;
1707 session
= __find_in_sessionid_hashtbl(sessionid
, net
);
1710 status
= nfsd4_get_session_locked(session
);
1718 /* caller must hold client_lock */
1720 unhash_session(struct nfsd4_session
*ses
)
1722 struct nfs4_client
*clp
= ses
->se_client
;
1723 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1725 lockdep_assert_held(&nn
->client_lock
);
1727 list_del(&ses
->se_hash
);
1728 spin_lock(&ses
->se_client
->cl_lock
);
1729 list_del(&ses
->se_perclnt
);
1730 spin_unlock(&ses
->se_client
->cl_lock
);
1733 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1735 STALE_CLIENTID(clientid_t
*clid
, struct nfsd_net
*nn
)
1738 * We're assuming the clid was not given out from a boot
1739 * precisely 2^32 (about 136 years) before this one. That seems
1740 * a safe assumption:
1742 if (clid
->cl_boot
== (u32
)nn
->boot_time
)
1744 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1745 clid
->cl_boot
, clid
->cl_id
, nn
->boot_time
);
1750 * XXX Should we use a slab cache ?
1751 * This type of memory management is somewhat inefficient, but we use it
1752 * anyway since SETCLIENTID is not a common operation.
1754 static struct nfs4_client
*alloc_client(struct xdr_netobj name
)
1756 struct nfs4_client
*clp
;
1759 clp
= kzalloc(sizeof(struct nfs4_client
), GFP_KERNEL
);
1762 clp
->cl_name
.data
= kmemdup(name
.data
, name
.len
, GFP_KERNEL
);
1763 if (clp
->cl_name
.data
== NULL
)
1765 clp
->cl_ownerstr_hashtbl
= kmalloc(sizeof(struct list_head
) *
1766 OWNER_HASH_SIZE
, GFP_KERNEL
);
1767 if (!clp
->cl_ownerstr_hashtbl
)
1768 goto err_no_hashtbl
;
1769 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++)
1770 INIT_LIST_HEAD(&clp
->cl_ownerstr_hashtbl
[i
]);
1771 clp
->cl_name
.len
= name
.len
;
1772 INIT_LIST_HEAD(&clp
->cl_sessions
);
1773 idr_init(&clp
->cl_stateids
);
1774 atomic_set(&clp
->cl_refcount
, 0);
1775 clp
->cl_cb_state
= NFSD4_CB_UNKNOWN
;
1776 INIT_LIST_HEAD(&clp
->cl_idhash
);
1777 INIT_LIST_HEAD(&clp
->cl_openowners
);
1778 INIT_LIST_HEAD(&clp
->cl_delegations
);
1779 INIT_LIST_HEAD(&clp
->cl_lru
);
1780 INIT_LIST_HEAD(&clp
->cl_revoked
);
1781 #ifdef CONFIG_NFSD_PNFS
1782 INIT_LIST_HEAD(&clp
->cl_lo_states
);
1784 spin_lock_init(&clp
->cl_lock
);
1785 rpc_init_wait_queue(&clp
->cl_cb_waitq
, "Backchannel slot table");
1788 kfree(clp
->cl_name
.data
);
1795 free_client(struct nfs4_client
*clp
)
1797 while (!list_empty(&clp
->cl_sessions
)) {
1798 struct nfsd4_session
*ses
;
1799 ses
= list_entry(clp
->cl_sessions
.next
, struct nfsd4_session
,
1801 list_del(&ses
->se_perclnt
);
1802 WARN_ON_ONCE(atomic_read(&ses
->se_ref
));
1805 rpc_destroy_wait_queue(&clp
->cl_cb_waitq
);
1806 free_svc_cred(&clp
->cl_cred
);
1807 kfree(clp
->cl_ownerstr_hashtbl
);
1808 kfree(clp
->cl_name
.data
);
1809 idr_destroy(&clp
->cl_stateids
);
1813 /* must be called under the client_lock */
1815 unhash_client_locked(struct nfs4_client
*clp
)
1817 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1818 struct nfsd4_session
*ses
;
1820 lockdep_assert_held(&nn
->client_lock
);
1822 /* Mark the client as expired! */
1824 /* Make it invisible */
1825 if (!list_empty(&clp
->cl_idhash
)) {
1826 list_del_init(&clp
->cl_idhash
);
1827 if (test_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
))
1828 rb_erase(&clp
->cl_namenode
, &nn
->conf_name_tree
);
1830 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
1832 list_del_init(&clp
->cl_lru
);
1833 spin_lock(&clp
->cl_lock
);
1834 list_for_each_entry(ses
, &clp
->cl_sessions
, se_perclnt
)
1835 list_del_init(&ses
->se_hash
);
1836 spin_unlock(&clp
->cl_lock
);
1840 unhash_client(struct nfs4_client
*clp
)
1842 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1844 spin_lock(&nn
->client_lock
);
1845 unhash_client_locked(clp
);
1846 spin_unlock(&nn
->client_lock
);
1849 static __be32
mark_client_expired_locked(struct nfs4_client
*clp
)
1851 if (atomic_read(&clp
->cl_refcount
))
1852 return nfserr_jukebox
;
1853 unhash_client_locked(clp
);
1858 __destroy_client(struct nfs4_client
*clp
)
1860 struct nfs4_openowner
*oo
;
1861 struct nfs4_delegation
*dp
;
1862 struct list_head reaplist
;
1864 INIT_LIST_HEAD(&reaplist
);
1865 spin_lock(&state_lock
);
1866 while (!list_empty(&clp
->cl_delegations
)) {
1867 dp
= list_entry(clp
->cl_delegations
.next
, struct nfs4_delegation
, dl_perclnt
);
1868 WARN_ON(!unhash_delegation_locked(dp
));
1869 list_add(&dp
->dl_recall_lru
, &reaplist
);
1871 spin_unlock(&state_lock
);
1872 while (!list_empty(&reaplist
)) {
1873 dp
= list_entry(reaplist
.next
, struct nfs4_delegation
, dl_recall_lru
);
1874 list_del_init(&dp
->dl_recall_lru
);
1875 put_clnt_odstate(dp
->dl_clnt_odstate
);
1876 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
1877 nfs4_put_stid(&dp
->dl_stid
);
1879 while (!list_empty(&clp
->cl_revoked
)) {
1880 dp
= list_entry(clp
->cl_revoked
.next
, struct nfs4_delegation
, dl_recall_lru
);
1881 list_del_init(&dp
->dl_recall_lru
);
1882 nfs4_put_stid(&dp
->dl_stid
);
1884 while (!list_empty(&clp
->cl_openowners
)) {
1885 oo
= list_entry(clp
->cl_openowners
.next
, struct nfs4_openowner
, oo_perclient
);
1886 nfs4_get_stateowner(&oo
->oo_owner
);
1887 release_openowner(oo
);
1889 nfsd4_return_all_client_layouts(clp
);
1890 nfsd4_shutdown_callback(clp
);
1891 if (clp
->cl_cb_conn
.cb_xprt
)
1892 svc_xprt_put(clp
->cl_cb_conn
.cb_xprt
);
1897 destroy_client(struct nfs4_client
*clp
)
1900 __destroy_client(clp
);
1903 static void expire_client(struct nfs4_client
*clp
)
1906 nfsd4_client_record_remove(clp
);
1907 __destroy_client(clp
);
1910 static void copy_verf(struct nfs4_client
*target
, nfs4_verifier
*source
)
1912 memcpy(target
->cl_verifier
.data
, source
->data
,
1913 sizeof(target
->cl_verifier
.data
));
1916 static void copy_clid(struct nfs4_client
*target
, struct nfs4_client
*source
)
1918 target
->cl_clientid
.cl_boot
= source
->cl_clientid
.cl_boot
;
1919 target
->cl_clientid
.cl_id
= source
->cl_clientid
.cl_id
;
1922 static int copy_cred(struct svc_cred
*target
, struct svc_cred
*source
)
1924 target
->cr_principal
= kstrdup(source
->cr_principal
, GFP_KERNEL
);
1925 target
->cr_raw_principal
= kstrdup(source
->cr_raw_principal
,
1927 if ((source
->cr_principal
&& ! target
->cr_principal
) ||
1928 (source
->cr_raw_principal
&& ! target
->cr_raw_principal
))
1931 target
->cr_flavor
= source
->cr_flavor
;
1932 target
->cr_uid
= source
->cr_uid
;
1933 target
->cr_gid
= source
->cr_gid
;
1934 target
->cr_group_info
= source
->cr_group_info
;
1935 get_group_info(target
->cr_group_info
);
1936 target
->cr_gss_mech
= source
->cr_gss_mech
;
1937 if (source
->cr_gss_mech
)
1938 gss_mech_get(source
->cr_gss_mech
);
1943 compare_blob(const struct xdr_netobj
*o1
, const struct xdr_netobj
*o2
)
1945 if (o1
->len
< o2
->len
)
1947 if (o1
->len
> o2
->len
)
1949 return memcmp(o1
->data
, o2
->data
, o1
->len
);
1952 static int same_name(const char *n1
, const char *n2
)
1954 return 0 == memcmp(n1
, n2
, HEXDIR_LEN
);
1958 same_verf(nfs4_verifier
*v1
, nfs4_verifier
*v2
)
1960 return 0 == memcmp(v1
->data
, v2
->data
, sizeof(v1
->data
));
1964 same_clid(clientid_t
*cl1
, clientid_t
*cl2
)
1966 return (cl1
->cl_boot
== cl2
->cl_boot
) && (cl1
->cl_id
== cl2
->cl_id
);
1969 static bool groups_equal(struct group_info
*g1
, struct group_info
*g2
)
1973 if (g1
->ngroups
!= g2
->ngroups
)
1975 for (i
=0; i
<g1
->ngroups
; i
++)
1976 if (!gid_eq(g1
->gid
[i
], g2
->gid
[i
]))
1982 * RFC 3530 language requires clid_inuse be returned when the
1983 * "principal" associated with a requests differs from that previously
1984 * used. We use uid, gid's, and gss principal string as our best
1985 * approximation. We also don't want to allow non-gss use of a client
1986 * established using gss: in theory cr_principal should catch that
1987 * change, but in practice cr_principal can be null even in the gss case
1988 * since gssd doesn't always pass down a principal string.
1990 static bool is_gss_cred(struct svc_cred
*cr
)
1992 /* Is cr_flavor one of the gss "pseudoflavors"?: */
1993 return (cr
->cr_flavor
> RPC_AUTH_MAXFLAVOR
);
1998 same_creds(struct svc_cred
*cr1
, struct svc_cred
*cr2
)
2000 if ((is_gss_cred(cr1
) != is_gss_cred(cr2
))
2001 || (!uid_eq(cr1
->cr_uid
, cr2
->cr_uid
))
2002 || (!gid_eq(cr1
->cr_gid
, cr2
->cr_gid
))
2003 || !groups_equal(cr1
->cr_group_info
, cr2
->cr_group_info
))
2005 if (cr1
->cr_principal
== cr2
->cr_principal
)
2007 if (!cr1
->cr_principal
|| !cr2
->cr_principal
)
2009 return 0 == strcmp(cr1
->cr_principal
, cr2
->cr_principal
);
2012 static bool svc_rqst_integrity_protected(struct svc_rqst
*rqstp
)
2014 struct svc_cred
*cr
= &rqstp
->rq_cred
;
2017 if (!cr
->cr_gss_mech
)
2019 service
= gss_pseudoflavor_to_service(cr
->cr_gss_mech
, cr
->cr_flavor
);
2020 return service
== RPC_GSS_SVC_INTEGRITY
||
2021 service
== RPC_GSS_SVC_PRIVACY
;
2024 bool nfsd4_mach_creds_match(struct nfs4_client
*cl
, struct svc_rqst
*rqstp
)
2026 struct svc_cred
*cr
= &rqstp
->rq_cred
;
2028 if (!cl
->cl_mach_cred
)
2030 if (cl
->cl_cred
.cr_gss_mech
!= cr
->cr_gss_mech
)
2032 if (!svc_rqst_integrity_protected(rqstp
))
2034 if (cl
->cl_cred
.cr_raw_principal
)
2035 return 0 == strcmp(cl
->cl_cred
.cr_raw_principal
,
2036 cr
->cr_raw_principal
);
2037 if (!cr
->cr_principal
)
2039 return 0 == strcmp(cl
->cl_cred
.cr_principal
, cr
->cr_principal
);
2042 static void gen_confirm(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
2047 * This is opaque to client, so no need to byte-swap. Use
2048 * __force to keep sparse happy
2050 verf
[0] = (__force __be32
)get_seconds();
2051 verf
[1] = (__force __be32
)nn
->clverifier_counter
++;
2052 memcpy(clp
->cl_confirm
.data
, verf
, sizeof(clp
->cl_confirm
.data
));
2055 static void gen_clid(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
2057 clp
->cl_clientid
.cl_boot
= nn
->boot_time
;
2058 clp
->cl_clientid
.cl_id
= nn
->clientid_counter
++;
2059 gen_confirm(clp
, nn
);
2062 static struct nfs4_stid
*
2063 find_stateid_locked(struct nfs4_client
*cl
, stateid_t
*t
)
2065 struct nfs4_stid
*ret
;
2067 ret
= idr_find(&cl
->cl_stateids
, t
->si_opaque
.so_id
);
2068 if (!ret
|| !ret
->sc_type
)
2073 static struct nfs4_stid
*
2074 find_stateid_by_type(struct nfs4_client
*cl
, stateid_t
*t
, char typemask
)
2076 struct nfs4_stid
*s
;
2078 spin_lock(&cl
->cl_lock
);
2079 s
= find_stateid_locked(cl
, t
);
2081 if (typemask
& s
->sc_type
)
2082 refcount_inc(&s
->sc_count
);
2086 spin_unlock(&cl
->cl_lock
);
2090 static struct nfs4_client
*create_client(struct xdr_netobj name
,
2091 struct svc_rqst
*rqstp
, nfs4_verifier
*verf
)
2093 struct nfs4_client
*clp
;
2094 struct sockaddr
*sa
= svc_addr(rqstp
);
2096 struct net
*net
= SVC_NET(rqstp
);
2098 clp
= alloc_client(name
);
2102 ret
= copy_cred(&clp
->cl_cred
, &rqstp
->rq_cred
);
2107 nfsd4_init_cb(&clp
->cl_cb_null
, clp
, NULL
, NFSPROC4_CLNT_CB_NULL
);
2108 clp
->cl_time
= get_seconds();
2109 clear_bit(0, &clp
->cl_cb_slot_busy
);
2110 copy_verf(clp
, verf
);
2111 rpc_copy_addr((struct sockaddr
*) &clp
->cl_addr
, sa
);
2112 clp
->cl_cb_session
= NULL
;
2118 add_clp_to_name_tree(struct nfs4_client
*new_clp
, struct rb_root
*root
)
2120 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
2121 struct nfs4_client
*clp
;
2124 clp
= rb_entry(*new, struct nfs4_client
, cl_namenode
);
2127 if (compare_blob(&clp
->cl_name
, &new_clp
->cl_name
) > 0)
2128 new = &((*new)->rb_left
);
2130 new = &((*new)->rb_right
);
2133 rb_link_node(&new_clp
->cl_namenode
, parent
, new);
2134 rb_insert_color(&new_clp
->cl_namenode
, root
);
2137 static struct nfs4_client
*
2138 find_clp_in_name_tree(struct xdr_netobj
*name
, struct rb_root
*root
)
2141 struct rb_node
*node
= root
->rb_node
;
2142 struct nfs4_client
*clp
;
2145 clp
= rb_entry(node
, struct nfs4_client
, cl_namenode
);
2146 cmp
= compare_blob(&clp
->cl_name
, name
);
2148 node
= node
->rb_left
;
2150 node
= node
->rb_right
;
2158 add_to_unconfirmed(struct nfs4_client
*clp
)
2160 unsigned int idhashval
;
2161 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2163 lockdep_assert_held(&nn
->client_lock
);
2165 clear_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2166 add_clp_to_name_tree(clp
, &nn
->unconf_name_tree
);
2167 idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2168 list_add(&clp
->cl_idhash
, &nn
->unconf_id_hashtbl
[idhashval
]);
2169 renew_client_locked(clp
);
2173 move_to_confirmed(struct nfs4_client
*clp
)
2175 unsigned int idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2176 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2178 lockdep_assert_held(&nn
->client_lock
);
2180 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp
);
2181 list_move(&clp
->cl_idhash
, &nn
->conf_id_hashtbl
[idhashval
]);
2182 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
2183 add_clp_to_name_tree(clp
, &nn
->conf_name_tree
);
2184 set_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2185 renew_client_locked(clp
);
2188 static struct nfs4_client
*
2189 find_client_in_id_table(struct list_head
*tbl
, clientid_t
*clid
, bool sessions
)
2191 struct nfs4_client
*clp
;
2192 unsigned int idhashval
= clientid_hashval(clid
->cl_id
);
2194 list_for_each_entry(clp
, &tbl
[idhashval
], cl_idhash
) {
2195 if (same_clid(&clp
->cl_clientid
, clid
)) {
2196 if ((bool)clp
->cl_minorversion
!= sessions
)
2198 renew_client_locked(clp
);
2205 static struct nfs4_client
*
2206 find_confirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2208 struct list_head
*tbl
= nn
->conf_id_hashtbl
;
2210 lockdep_assert_held(&nn
->client_lock
);
2211 return find_client_in_id_table(tbl
, clid
, sessions
);
2214 static struct nfs4_client
*
2215 find_unconfirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2217 struct list_head
*tbl
= nn
->unconf_id_hashtbl
;
2219 lockdep_assert_held(&nn
->client_lock
);
2220 return find_client_in_id_table(tbl
, clid
, sessions
);
2223 static bool clp_used_exchangeid(struct nfs4_client
*clp
)
2225 return clp
->cl_exchange_flags
!= 0;
2228 static struct nfs4_client
*
2229 find_confirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2231 lockdep_assert_held(&nn
->client_lock
);
2232 return find_clp_in_name_tree(name
, &nn
->conf_name_tree
);
2235 static struct nfs4_client
*
2236 find_unconfirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2238 lockdep_assert_held(&nn
->client_lock
);
2239 return find_clp_in_name_tree(name
, &nn
->unconf_name_tree
);
2243 gen_callback(struct nfs4_client
*clp
, struct nfsd4_setclientid
*se
, struct svc_rqst
*rqstp
)
2245 struct nfs4_cb_conn
*conn
= &clp
->cl_cb_conn
;
2246 struct sockaddr
*sa
= svc_addr(rqstp
);
2247 u32 scopeid
= rpc_get_scope_id(sa
);
2248 unsigned short expected_family
;
2250 /* Currently, we only support tcp and tcp6 for the callback channel */
2251 if (se
->se_callback_netid_len
== 3 &&
2252 !memcmp(se
->se_callback_netid_val
, "tcp", 3))
2253 expected_family
= AF_INET
;
2254 else if (se
->se_callback_netid_len
== 4 &&
2255 !memcmp(se
->se_callback_netid_val
, "tcp6", 4))
2256 expected_family
= AF_INET6
;
2260 conn
->cb_addrlen
= rpc_uaddr2sockaddr(clp
->net
, se
->se_callback_addr_val
,
2261 se
->se_callback_addr_len
,
2262 (struct sockaddr
*)&conn
->cb_addr
,
2263 sizeof(conn
->cb_addr
));
2265 if (!conn
->cb_addrlen
|| conn
->cb_addr
.ss_family
!= expected_family
)
2268 if (conn
->cb_addr
.ss_family
== AF_INET6
)
2269 ((struct sockaddr_in6
*)&conn
->cb_addr
)->sin6_scope_id
= scopeid
;
2271 conn
->cb_prog
= se
->se_callback_prog
;
2272 conn
->cb_ident
= se
->se_callback_ident
;
2273 memcpy(&conn
->cb_saddr
, &rqstp
->rq_daddr
, rqstp
->rq_daddrlen
);
2276 conn
->cb_addr
.ss_family
= AF_UNSPEC
;
2277 conn
->cb_addrlen
= 0;
2278 dprintk("NFSD: this client (clientid %08x/%08x) "
2279 "will not receive delegations\n",
2280 clp
->cl_clientid
.cl_boot
, clp
->cl_clientid
.cl_id
);
2286 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2289 nfsd4_store_cache_entry(struct nfsd4_compoundres
*resp
)
2291 struct xdr_buf
*buf
= resp
->xdr
.buf
;
2292 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2295 dprintk("--> %s slot %p\n", __func__
, slot
);
2297 slot
->sl_flags
|= NFSD4_SLOT_INITIALIZED
;
2298 slot
->sl_opcnt
= resp
->opcnt
;
2299 slot
->sl_status
= resp
->cstate
.status
;
2300 free_svc_cred(&slot
->sl_cred
);
2301 copy_cred(&slot
->sl_cred
, &resp
->rqstp
->rq_cred
);
2303 if (!nfsd4_cache_this(resp
)) {
2304 slot
->sl_flags
&= ~NFSD4_SLOT_CACHED
;
2307 slot
->sl_flags
|= NFSD4_SLOT_CACHED
;
2309 base
= resp
->cstate
.data_offset
;
2310 slot
->sl_datalen
= buf
->len
- base
;
2311 if (read_bytes_from_xdr_buf(buf
, base
, slot
->sl_data
, slot
->sl_datalen
))
2312 WARN(1, "%s: sessions DRC could not cache compound\n",
2318 * Encode the replay sequence operation from the slot values.
2319 * If cachethis is FALSE encode the uncached rep error on the next
2320 * operation which sets resp->p and increments resp->opcnt for
2321 * nfs4svc_encode_compoundres.
2325 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs
*args
,
2326 struct nfsd4_compoundres
*resp
)
2328 struct nfsd4_op
*op
;
2329 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2331 /* Encode the replayed sequence operation */
2332 op
= &args
->ops
[resp
->opcnt
- 1];
2333 nfsd4_encode_operation(resp
, op
);
2335 if (slot
->sl_flags
& NFSD4_SLOT_CACHED
)
2337 if (args
->opcnt
== 1) {
2339 * The original operation wasn't a solo sequence--we
2340 * always cache those--so this retry must not match the
2343 op
->status
= nfserr_seq_false_retry
;
2345 op
= &args
->ops
[resp
->opcnt
++];
2346 op
->status
= nfserr_retry_uncached_rep
;
2347 nfsd4_encode_operation(resp
, op
);
2353 * The sequence operation is not cached because we can use the slot and
2357 nfsd4_replay_cache_entry(struct nfsd4_compoundres
*resp
,
2358 struct nfsd4_sequence
*seq
)
2360 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2361 struct xdr_stream
*xdr
= &resp
->xdr
;
2365 dprintk("--> %s slot %p\n", __func__
, slot
);
2367 status
= nfsd4_enc_sequence_replay(resp
->rqstp
->rq_argp
, resp
);
2371 p
= xdr_reserve_space(xdr
, slot
->sl_datalen
);
2374 return nfserr_serverfault
;
2376 xdr_encode_opaque_fixed(p
, slot
->sl_data
, slot
->sl_datalen
);
2377 xdr_commit_encode(xdr
);
2379 resp
->opcnt
= slot
->sl_opcnt
;
2380 return slot
->sl_status
;
2384 * Set the exchange_id flags returned by the server.
2387 nfsd4_set_ex_flags(struct nfs4_client
*new, struct nfsd4_exchange_id
*clid
)
2389 #ifdef CONFIG_NFSD_PNFS
2390 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_PNFS_MDS
;
2392 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_NON_PNFS
;
2395 /* Referrals are supported, Migration is not. */
2396 new->cl_exchange_flags
|= EXCHGID4_FLAG_SUPP_MOVED_REFER
;
2398 /* set the wire flags to return to client. */
2399 clid
->flags
= new->cl_exchange_flags
;
2402 static bool client_has_openowners(struct nfs4_client
*clp
)
2404 struct nfs4_openowner
*oo
;
2406 list_for_each_entry(oo
, &clp
->cl_openowners
, oo_perclient
) {
2407 if (!list_empty(&oo
->oo_owner
.so_stateids
))
2413 static bool client_has_state(struct nfs4_client
*clp
)
2415 return client_has_openowners(clp
)
2416 #ifdef CONFIG_NFSD_PNFS
2417 || !list_empty(&clp
->cl_lo_states
)
2419 || !list_empty(&clp
->cl_delegations
)
2420 || !list_empty(&clp
->cl_sessions
);
2424 nfsd4_exchange_id(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
2425 union nfsd4_op_u
*u
)
2427 struct nfsd4_exchange_id
*exid
= &u
->exchange_id
;
2428 struct nfs4_client
*conf
, *new;
2429 struct nfs4_client
*unconf
= NULL
;
2431 char addr_str
[INET6_ADDRSTRLEN
];
2432 nfs4_verifier verf
= exid
->verifier
;
2433 struct sockaddr
*sa
= svc_addr(rqstp
);
2434 bool update
= exid
->flags
& EXCHGID4_FLAG_UPD_CONFIRMED_REC_A
;
2435 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2437 rpc_ntop(sa
, addr_str
, sizeof(addr_str
));
2438 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2439 "ip_addr=%s flags %x, spa_how %d\n",
2440 __func__
, rqstp
, exid
, exid
->clname
.len
, exid
->clname
.data
,
2441 addr_str
, exid
->flags
, exid
->spa_how
);
2443 if (exid
->flags
& ~EXCHGID4_FLAG_MASK_A
)
2444 return nfserr_inval
;
2446 new = create_client(exid
->clname
, rqstp
, &verf
);
2448 return nfserr_jukebox
;
2450 switch (exid
->spa_how
) {
2452 exid
->spo_must_enforce
[0] = 0;
2453 exid
->spo_must_enforce
[1] = (
2454 1 << (OP_BIND_CONN_TO_SESSION
- 32) |
2455 1 << (OP_EXCHANGE_ID
- 32) |
2456 1 << (OP_CREATE_SESSION
- 32) |
2457 1 << (OP_DESTROY_SESSION
- 32) |
2458 1 << (OP_DESTROY_CLIENTID
- 32));
2460 exid
->spo_must_allow
[0] &= (1 << (OP_CLOSE
) |
2461 1 << (OP_OPEN_DOWNGRADE
) |
2463 1 << (OP_DELEGRETURN
));
2465 exid
->spo_must_allow
[1] &= (
2466 1 << (OP_TEST_STATEID
- 32) |
2467 1 << (OP_FREE_STATEID
- 32));
2468 if (!svc_rqst_integrity_protected(rqstp
)) {
2469 status
= nfserr_inval
;
2473 * Sometimes userspace doesn't give us a principal.
2474 * Which is a bug, really. Anyway, we can't enforce
2475 * MACH_CRED in that case, better to give up now:
2477 if (!new->cl_cred
.cr_principal
&&
2478 !new->cl_cred
.cr_raw_principal
) {
2479 status
= nfserr_serverfault
;
2482 new->cl_mach_cred
= true;
2485 default: /* checked by xdr code */
2488 status
= nfserr_encr_alg_unsupp
;
2492 /* Cases below refer to rfc 5661 section 18.35.4: */
2493 spin_lock(&nn
->client_lock
);
2494 conf
= find_confirmed_client_by_name(&exid
->clname
, nn
);
2496 bool creds_match
= same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
);
2497 bool verfs_match
= same_verf(&verf
, &conf
->cl_verifier
);
2500 if (!clp_used_exchangeid(conf
)) { /* buggy client */
2501 status
= nfserr_inval
;
2504 if (!nfsd4_mach_creds_match(conf
, rqstp
)) {
2505 status
= nfserr_wrong_cred
;
2508 if (!creds_match
) { /* case 9 */
2509 status
= nfserr_perm
;
2512 if (!verfs_match
) { /* case 8 */
2513 status
= nfserr_not_same
;
2517 exid
->flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2520 if (!creds_match
) { /* case 3 */
2521 if (client_has_state(conf
)) {
2522 status
= nfserr_clid_inuse
;
2527 if (verfs_match
) { /* case 2 */
2528 conf
->cl_exchange_flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2531 /* case 5, client reboot */
2536 if (update
) { /* case 7 */
2537 status
= nfserr_noent
;
2541 unconf
= find_unconfirmed_client_by_name(&exid
->clname
, nn
);
2542 if (unconf
) /* case 4, possible retry or client restart */
2543 unhash_client_locked(unconf
);
2545 /* case 1 (normal case) */
2548 status
= mark_client_expired_locked(conf
);
2552 new->cl_minorversion
= cstate
->minorversion
;
2553 new->cl_spo_must_allow
.u
.words
[0] = exid
->spo_must_allow
[0];
2554 new->cl_spo_must_allow
.u
.words
[1] = exid
->spo_must_allow
[1];
2557 add_to_unconfirmed(new);
2560 exid
->clientid
.cl_boot
= conf
->cl_clientid
.cl_boot
;
2561 exid
->clientid
.cl_id
= conf
->cl_clientid
.cl_id
;
2563 exid
->seqid
= conf
->cl_cs_slot
.sl_seqid
+ 1;
2564 nfsd4_set_ex_flags(conf
, exid
);
2566 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2567 conf
->cl_cs_slot
.sl_seqid
, conf
->cl_exchange_flags
);
2571 spin_unlock(&nn
->client_lock
);
2576 expire_client(unconf
);
2581 check_slot_seqid(u32 seqid
, u32 slot_seqid
, int slot_inuse
)
2583 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__
, seqid
,
2586 /* The slot is in use, and no response has been sent. */
2588 if (seqid
== slot_seqid
)
2589 return nfserr_jukebox
;
2591 return nfserr_seq_misordered
;
2593 /* Note unsigned 32-bit arithmetic handles wraparound: */
2594 if (likely(seqid
== slot_seqid
+ 1))
2596 if (seqid
== slot_seqid
)
2597 return nfserr_replay_cache
;
2598 return nfserr_seq_misordered
;
2602 * Cache the create session result into the create session single DRC
2603 * slot cache by saving the xdr structure. sl_seqid has been set.
2604 * Do this for solo or embedded create session operations.
2607 nfsd4_cache_create_session(struct nfsd4_create_session
*cr_ses
,
2608 struct nfsd4_clid_slot
*slot
, __be32 nfserr
)
2610 slot
->sl_status
= nfserr
;
2611 memcpy(&slot
->sl_cr_ses
, cr_ses
, sizeof(*cr_ses
));
2615 nfsd4_replay_create_session(struct nfsd4_create_session
*cr_ses
,
2616 struct nfsd4_clid_slot
*slot
)
2618 memcpy(cr_ses
, &slot
->sl_cr_ses
, sizeof(*cr_ses
));
2619 return slot
->sl_status
;
2622 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2623 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2624 1 + /* MIN tag is length with zero, only length */ \
2625 3 + /* version, opcount, opcode */ \
2626 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2627 /* seqid, slotID, slotID, cache */ \
2628 4 ) * sizeof(__be32))
2630 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2631 2 + /* verifier: AUTH_NULL, length 0 */\
2633 1 + /* MIN tag is length with zero, only length */ \
2634 3 + /* opcount, opcode, opstatus*/ \
2635 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2636 /* seqid, slotID, slotID, slotID, status */ \
2637 5 ) * sizeof(__be32))
2639 static __be32
check_forechannel_attrs(struct nfsd4_channel_attrs
*ca
, struct nfsd_net
*nn
)
2641 u32 maxrpc
= nn
->nfsd_serv
->sv_max_mesg
;
2643 if (ca
->maxreq_sz
< NFSD_MIN_REQ_HDR_SEQ_SZ
)
2644 return nfserr_toosmall
;
2645 if (ca
->maxresp_sz
< NFSD_MIN_RESP_HDR_SEQ_SZ
)
2646 return nfserr_toosmall
;
2647 ca
->headerpadsz
= 0;
2648 ca
->maxreq_sz
= min_t(u32
, ca
->maxreq_sz
, maxrpc
);
2649 ca
->maxresp_sz
= min_t(u32
, ca
->maxresp_sz
, maxrpc
);
2650 ca
->maxops
= min_t(u32
, ca
->maxops
, NFSD_MAX_OPS_PER_COMPOUND
);
2651 ca
->maxresp_cached
= min_t(u32
, ca
->maxresp_cached
,
2652 NFSD_SLOT_CACHE_SIZE
+ NFSD_MIN_HDR_SEQ_SZ
);
2653 ca
->maxreqs
= min_t(u32
, ca
->maxreqs
, NFSD_MAX_SLOTS_PER_SESSION
);
2655 * Note decreasing slot size below client's request may make it
2656 * difficult for client to function correctly, whereas
2657 * decreasing the number of slots will (just?) affect
2658 * performance. When short on memory we therefore prefer to
2659 * decrease number of slots instead of their size. Clients that
2660 * request larger slots than they need will get poor results:
2662 ca
->maxreqs
= nfsd4_get_drc_mem(ca
);
2664 return nfserr_jukebox
;
2670 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2671 * These are based on similar macros in linux/sunrpc/msg_prot.h .
2673 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
2674 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2676 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2677 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2679 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2680 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
2681 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2682 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2685 static __be32
check_backchannel_attrs(struct nfsd4_channel_attrs
*ca
)
2687 ca
->headerpadsz
= 0;
2689 if (ca
->maxreq_sz
< NFSD_CB_MAX_REQ_SZ
)
2690 return nfserr_toosmall
;
2691 if (ca
->maxresp_sz
< NFSD_CB_MAX_RESP_SZ
)
2692 return nfserr_toosmall
;
2693 ca
->maxresp_cached
= 0;
2695 return nfserr_toosmall
;
2700 static __be32
nfsd4_check_cb_sec(struct nfsd4_cb_sec
*cbs
)
2702 switch (cbs
->flavor
) {
2708 * GSS case: the spec doesn't allow us to return this
2709 * error. But it also doesn't allow us not to support
2711 * I'd rather this fail hard than return some error the
2712 * client might think it can already handle:
2714 return nfserr_encr_alg_unsupp
;
2719 nfsd4_create_session(struct svc_rqst
*rqstp
,
2720 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
2722 struct nfsd4_create_session
*cr_ses
= &u
->create_session
;
2723 struct sockaddr
*sa
= svc_addr(rqstp
);
2724 struct nfs4_client
*conf
, *unconf
;
2725 struct nfs4_client
*old
= NULL
;
2726 struct nfsd4_session
*new;
2727 struct nfsd4_conn
*conn
;
2728 struct nfsd4_clid_slot
*cs_slot
= NULL
;
2730 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2732 if (cr_ses
->flags
& ~SESSION4_FLAG_MASK_A
)
2733 return nfserr_inval
;
2734 status
= nfsd4_check_cb_sec(&cr_ses
->cb_sec
);
2737 status
= check_forechannel_attrs(&cr_ses
->fore_channel
, nn
);
2740 status
= check_backchannel_attrs(&cr_ses
->back_channel
);
2742 goto out_release_drc_mem
;
2743 status
= nfserr_jukebox
;
2744 new = alloc_session(&cr_ses
->fore_channel
, &cr_ses
->back_channel
);
2746 goto out_release_drc_mem
;
2747 conn
= alloc_conn_from_crses(rqstp
, cr_ses
);
2749 goto out_free_session
;
2751 spin_lock(&nn
->client_lock
);
2752 unconf
= find_unconfirmed_client(&cr_ses
->clientid
, true, nn
);
2753 conf
= find_confirmed_client(&cr_ses
->clientid
, true, nn
);
2754 WARN_ON_ONCE(conf
&& unconf
);
2757 status
= nfserr_wrong_cred
;
2758 if (!nfsd4_mach_creds_match(conf
, rqstp
))
2760 cs_slot
= &conf
->cl_cs_slot
;
2761 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2763 if (status
== nfserr_replay_cache
)
2764 status
= nfsd4_replay_create_session(cr_ses
, cs_slot
);
2767 } else if (unconf
) {
2768 if (!same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
) ||
2769 !rpc_cmp_addr(sa
, (struct sockaddr
*) &unconf
->cl_addr
)) {
2770 status
= nfserr_clid_inuse
;
2773 status
= nfserr_wrong_cred
;
2774 if (!nfsd4_mach_creds_match(unconf
, rqstp
))
2776 cs_slot
= &unconf
->cl_cs_slot
;
2777 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2779 /* an unconfirmed replay returns misordered */
2780 status
= nfserr_seq_misordered
;
2783 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
2785 status
= mark_client_expired_locked(old
);
2791 move_to_confirmed(unconf
);
2794 status
= nfserr_stale_clientid
;
2798 /* Persistent sessions are not supported */
2799 cr_ses
->flags
&= ~SESSION4_PERSIST
;
2800 /* Upshifting from TCP to RDMA is not supported */
2801 cr_ses
->flags
&= ~SESSION4_RDMA
;
2803 init_session(rqstp
, new, conf
, cr_ses
);
2804 nfsd4_get_session_locked(new);
2806 memcpy(cr_ses
->sessionid
.data
, new->se_sessionid
.data
,
2807 NFS4_MAX_SESSIONID_LEN
);
2808 cs_slot
->sl_seqid
++;
2809 cr_ses
->seqid
= cs_slot
->sl_seqid
;
2811 /* cache solo and embedded create sessions under the client_lock */
2812 nfsd4_cache_create_session(cr_ses
, cs_slot
, status
);
2813 spin_unlock(&nn
->client_lock
);
2814 /* init connection and backchannel */
2815 nfsd4_init_conn(rqstp
, conn
, new);
2816 nfsd4_put_session(new);
2821 spin_unlock(&nn
->client_lock
);
2826 __free_session(new);
2827 out_release_drc_mem
:
2828 nfsd4_put_drc_mem(&cr_ses
->fore_channel
);
2832 static __be32
nfsd4_map_bcts_dir(u32
*dir
)
2835 case NFS4_CDFC4_FORE
:
2836 case NFS4_CDFC4_BACK
:
2838 case NFS4_CDFC4_FORE_OR_BOTH
:
2839 case NFS4_CDFC4_BACK_OR_BOTH
:
2840 *dir
= NFS4_CDFC4_BOTH
;
2843 return nfserr_inval
;
2846 __be32
nfsd4_backchannel_ctl(struct svc_rqst
*rqstp
,
2847 struct nfsd4_compound_state
*cstate
,
2848 union nfsd4_op_u
*u
)
2850 struct nfsd4_backchannel_ctl
*bc
= &u
->backchannel_ctl
;
2851 struct nfsd4_session
*session
= cstate
->session
;
2852 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2855 status
= nfsd4_check_cb_sec(&bc
->bc_cb_sec
);
2858 spin_lock(&nn
->client_lock
);
2859 session
->se_cb_prog
= bc
->bc_cb_program
;
2860 session
->se_cb_sec
= bc
->bc_cb_sec
;
2861 spin_unlock(&nn
->client_lock
);
2863 nfsd4_probe_callback(session
->se_client
);
2868 __be32
nfsd4_bind_conn_to_session(struct svc_rqst
*rqstp
,
2869 struct nfsd4_compound_state
*cstate
,
2870 union nfsd4_op_u
*u
)
2872 struct nfsd4_bind_conn_to_session
*bcts
= &u
->bind_conn_to_session
;
2874 struct nfsd4_conn
*conn
;
2875 struct nfsd4_session
*session
;
2876 struct net
*net
= SVC_NET(rqstp
);
2877 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2879 if (!nfsd4_last_compound_op(rqstp
))
2880 return nfserr_not_only_op
;
2881 spin_lock(&nn
->client_lock
);
2882 session
= find_in_sessionid_hashtbl(&bcts
->sessionid
, net
, &status
);
2883 spin_unlock(&nn
->client_lock
);
2885 goto out_no_session
;
2886 status
= nfserr_wrong_cred
;
2887 if (!nfsd4_mach_creds_match(session
->se_client
, rqstp
))
2889 status
= nfsd4_map_bcts_dir(&bcts
->dir
);
2892 conn
= alloc_conn(rqstp
, bcts
->dir
);
2893 status
= nfserr_jukebox
;
2896 nfsd4_init_conn(rqstp
, conn
, session
);
2899 nfsd4_put_session(session
);
2904 static bool nfsd4_compound_in_session(struct nfsd4_session
*session
, struct nfs4_sessionid
*sid
)
2908 return !memcmp(sid
, &session
->se_sessionid
, sizeof(*sid
));
2912 nfsd4_destroy_session(struct svc_rqst
*r
, struct nfsd4_compound_state
*cstate
,
2913 union nfsd4_op_u
*u
)
2915 struct nfsd4_destroy_session
*sessionid
= &u
->destroy_session
;
2916 struct nfsd4_session
*ses
;
2918 int ref_held_by_me
= 0;
2919 struct net
*net
= SVC_NET(r
);
2920 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2922 status
= nfserr_not_only_op
;
2923 if (nfsd4_compound_in_session(cstate
->session
, &sessionid
->sessionid
)) {
2924 if (!nfsd4_last_compound_op(r
))
2928 dump_sessionid(__func__
, &sessionid
->sessionid
);
2929 spin_lock(&nn
->client_lock
);
2930 ses
= find_in_sessionid_hashtbl(&sessionid
->sessionid
, net
, &status
);
2932 goto out_client_lock
;
2933 status
= nfserr_wrong_cred
;
2934 if (!nfsd4_mach_creds_match(ses
->se_client
, r
))
2935 goto out_put_session
;
2936 status
= mark_session_dead_locked(ses
, 1 + ref_held_by_me
);
2938 goto out_put_session
;
2939 unhash_session(ses
);
2940 spin_unlock(&nn
->client_lock
);
2942 nfsd4_probe_callback_sync(ses
->se_client
);
2944 spin_lock(&nn
->client_lock
);
2947 nfsd4_put_session_locked(ses
);
2949 spin_unlock(&nn
->client_lock
);
2954 static struct nfsd4_conn
*__nfsd4_find_conn(struct svc_xprt
*xpt
, struct nfsd4_session
*s
)
2956 struct nfsd4_conn
*c
;
2958 list_for_each_entry(c
, &s
->se_conns
, cn_persession
) {
2959 if (c
->cn_xprt
== xpt
) {
2966 static __be32
nfsd4_sequence_check_conn(struct nfsd4_conn
*new, struct nfsd4_session
*ses
)
2968 struct nfs4_client
*clp
= ses
->se_client
;
2969 struct nfsd4_conn
*c
;
2970 __be32 status
= nfs_ok
;
2973 spin_lock(&clp
->cl_lock
);
2974 c
= __nfsd4_find_conn(new->cn_xprt
, ses
);
2977 status
= nfserr_conn_not_bound_to_session
;
2978 if (clp
->cl_mach_cred
)
2980 __nfsd4_hash_conn(new, ses
);
2981 spin_unlock(&clp
->cl_lock
);
2982 ret
= nfsd4_register_conn(new);
2984 /* oops; xprt is already down: */
2985 nfsd4_conn_lost(&new->cn_xpt_user
);
2988 spin_unlock(&clp
->cl_lock
);
2993 static bool nfsd4_session_too_many_ops(struct svc_rqst
*rqstp
, struct nfsd4_session
*session
)
2995 struct nfsd4_compoundargs
*args
= rqstp
->rq_argp
;
2997 return args
->opcnt
> session
->se_fchannel
.maxops
;
3000 static bool nfsd4_request_too_big(struct svc_rqst
*rqstp
,
3001 struct nfsd4_session
*session
)
3003 struct xdr_buf
*xb
= &rqstp
->rq_arg
;
3005 return xb
->len
> session
->se_fchannel
.maxreq_sz
;
3008 static bool replay_matches_cache(struct svc_rqst
*rqstp
,
3009 struct nfsd4_sequence
*seq
, struct nfsd4_slot
*slot
)
3011 struct nfsd4_compoundargs
*argp
= rqstp
->rq_argp
;
3013 if ((bool)(slot
->sl_flags
& NFSD4_SLOT_CACHETHIS
) !=
3014 (bool)seq
->cachethis
)
3017 * If there's an error than the reply can have fewer ops than
3018 * the call. But if we cached a reply with *more* ops than the
3019 * call you're sending us now, then this new call is clearly not
3020 * really a replay of the old one:
3022 if (slot
->sl_opcnt
< argp
->opcnt
)
3024 /* This is the only check explicitly called by spec: */
3025 if (!same_creds(&rqstp
->rq_cred
, &slot
->sl_cred
))
3028 * There may be more comparisons we could actually do, but the
3029 * spec doesn't require us to catch every case where the calls
3030 * don't match (that would require caching the call as well as
3031 * the reply), so we don't bother.
3037 nfsd4_sequence(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3038 union nfsd4_op_u
*u
)
3040 struct nfsd4_sequence
*seq
= &u
->sequence
;
3041 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
3042 struct xdr_stream
*xdr
= &resp
->xdr
;
3043 struct nfsd4_session
*session
;
3044 struct nfs4_client
*clp
;
3045 struct nfsd4_slot
*slot
;
3046 struct nfsd4_conn
*conn
;
3049 struct net
*net
= SVC_NET(rqstp
);
3050 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
3052 if (resp
->opcnt
!= 1)
3053 return nfserr_sequence_pos
;
3056 * Will be either used or freed by nfsd4_sequence_check_conn
3059 conn
= alloc_conn(rqstp
, NFS4_CDFC4_FORE
);
3061 return nfserr_jukebox
;
3063 spin_lock(&nn
->client_lock
);
3064 session
= find_in_sessionid_hashtbl(&seq
->sessionid
, net
, &status
);
3066 goto out_no_session
;
3067 clp
= session
->se_client
;
3069 status
= nfserr_too_many_ops
;
3070 if (nfsd4_session_too_many_ops(rqstp
, session
))
3071 goto out_put_session
;
3073 status
= nfserr_req_too_big
;
3074 if (nfsd4_request_too_big(rqstp
, session
))
3075 goto out_put_session
;
3077 status
= nfserr_badslot
;
3078 if (seq
->slotid
>= session
->se_fchannel
.maxreqs
)
3079 goto out_put_session
;
3081 slot
= session
->se_slots
[seq
->slotid
];
3082 dprintk("%s: slotid %d\n", __func__
, seq
->slotid
);
3084 /* We do not negotiate the number of slots yet, so set the
3085 * maxslots to the session maxreqs which is used to encode
3086 * sr_highest_slotid and the sr_target_slot id to maxslots */
3087 seq
->maxslots
= session
->se_fchannel
.maxreqs
;
3089 status
= check_slot_seqid(seq
->seqid
, slot
->sl_seqid
,
3090 slot
->sl_flags
& NFSD4_SLOT_INUSE
);
3091 if (status
== nfserr_replay_cache
) {
3092 status
= nfserr_seq_misordered
;
3093 if (!(slot
->sl_flags
& NFSD4_SLOT_INITIALIZED
))
3094 goto out_put_session
;
3095 status
= nfserr_seq_false_retry
;
3096 if (!replay_matches_cache(rqstp
, seq
, slot
))
3097 goto out_put_session
;
3098 cstate
->slot
= slot
;
3099 cstate
->session
= session
;
3101 /* Return the cached reply status and set cstate->status
3102 * for nfsd4_proc_compound processing */
3103 status
= nfsd4_replay_cache_entry(resp
, seq
);
3104 cstate
->status
= nfserr_replay_cache
;
3108 goto out_put_session
;
3110 status
= nfsd4_sequence_check_conn(conn
, session
);
3113 goto out_put_session
;
3115 buflen
= (seq
->cachethis
) ?
3116 session
->se_fchannel
.maxresp_cached
:
3117 session
->se_fchannel
.maxresp_sz
;
3118 status
= (seq
->cachethis
) ? nfserr_rep_too_big_to_cache
:
3120 if (xdr_restrict_buflen(xdr
, buflen
- rqstp
->rq_auth_slack
))
3121 goto out_put_session
;
3122 svc_reserve(rqstp
, buflen
);
3125 /* Success! bump slot seqid */
3126 slot
->sl_seqid
= seq
->seqid
;
3127 slot
->sl_flags
|= NFSD4_SLOT_INUSE
;
3129 slot
->sl_flags
|= NFSD4_SLOT_CACHETHIS
;
3131 slot
->sl_flags
&= ~NFSD4_SLOT_CACHETHIS
;
3133 cstate
->slot
= slot
;
3134 cstate
->session
= session
;
3138 switch (clp
->cl_cb_state
) {
3140 seq
->status_flags
= SEQ4_STATUS_CB_PATH_DOWN
;
3142 case NFSD4_CB_FAULT
:
3143 seq
->status_flags
= SEQ4_STATUS_BACKCHANNEL_FAULT
;
3146 seq
->status_flags
= 0;
3148 if (!list_empty(&clp
->cl_revoked
))
3149 seq
->status_flags
|= SEQ4_STATUS_RECALLABLE_STATE_REVOKED
;
3153 spin_unlock(&nn
->client_lock
);
3156 nfsd4_put_session_locked(session
);
3157 goto out_no_session
;
3161 nfsd4_sequence_done(struct nfsd4_compoundres
*resp
)
3163 struct nfsd4_compound_state
*cs
= &resp
->cstate
;
3165 if (nfsd4_has_session(cs
)) {
3166 if (cs
->status
!= nfserr_replay_cache
) {
3167 nfsd4_store_cache_entry(resp
);
3168 cs
->slot
->sl_flags
&= ~NFSD4_SLOT_INUSE
;
3170 /* Drop session reference that was taken in nfsd4_sequence() */
3171 nfsd4_put_session(cs
->session
);
3173 put_client_renew(cs
->clp
);
3177 nfsd4_destroy_clientid(struct svc_rqst
*rqstp
,
3178 struct nfsd4_compound_state
*cstate
,
3179 union nfsd4_op_u
*u
)
3181 struct nfsd4_destroy_clientid
*dc
= &u
->destroy_clientid
;
3182 struct nfs4_client
*conf
, *unconf
;
3183 struct nfs4_client
*clp
= NULL
;
3185 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3187 spin_lock(&nn
->client_lock
);
3188 unconf
= find_unconfirmed_client(&dc
->clientid
, true, nn
);
3189 conf
= find_confirmed_client(&dc
->clientid
, true, nn
);
3190 WARN_ON_ONCE(conf
&& unconf
);
3193 if (client_has_state(conf
)) {
3194 status
= nfserr_clientid_busy
;
3197 status
= mark_client_expired_locked(conf
);
3204 status
= nfserr_stale_clientid
;
3207 if (!nfsd4_mach_creds_match(clp
, rqstp
)) {
3209 status
= nfserr_wrong_cred
;
3212 unhash_client_locked(clp
);
3214 spin_unlock(&nn
->client_lock
);
3221 nfsd4_reclaim_complete(struct svc_rqst
*rqstp
,
3222 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
3224 struct nfsd4_reclaim_complete
*rc
= &u
->reclaim_complete
;
3227 if (rc
->rca_one_fs
) {
3228 if (!cstate
->current_fh
.fh_dentry
)
3229 return nfserr_nofilehandle
;
3231 * We don't take advantage of the rca_one_fs case.
3232 * That's OK, it's optional, we can safely ignore it.
3237 status
= nfserr_complete_already
;
3238 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
,
3239 &cstate
->session
->se_client
->cl_flags
))
3242 status
= nfserr_stale_clientid
;
3243 if (is_client_expired(cstate
->session
->se_client
))
3245 * The following error isn't really legal.
3246 * But we only get here if the client just explicitly
3247 * destroyed the client. Surely it no longer cares what
3248 * error it gets back on an operation for the dead
3254 nfsd4_client_record_create(cstate
->session
->se_client
);
3260 nfsd4_setclientid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3261 union nfsd4_op_u
*u
)
3263 struct nfsd4_setclientid
*setclid
= &u
->setclientid
;
3264 struct xdr_netobj clname
= setclid
->se_name
;
3265 nfs4_verifier clverifier
= setclid
->se_verf
;
3266 struct nfs4_client
*conf
, *new;
3267 struct nfs4_client
*unconf
= NULL
;
3269 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3271 new = create_client(clname
, rqstp
, &clverifier
);
3273 return nfserr_jukebox
;
3274 /* Cases below refer to rfc 3530 section 14.2.33: */
3275 spin_lock(&nn
->client_lock
);
3276 conf
= find_confirmed_client_by_name(&clname
, nn
);
3277 if (conf
&& client_has_state(conf
)) {
3279 status
= nfserr_clid_inuse
;
3280 if (clp_used_exchangeid(conf
))
3282 if (!same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
)) {
3283 char addr_str
[INET6_ADDRSTRLEN
];
3284 rpc_ntop((struct sockaddr
*) &conf
->cl_addr
, addr_str
,
3286 dprintk("NFSD: setclientid: string in use by client "
3287 "at %s\n", addr_str
);
3291 unconf
= find_unconfirmed_client_by_name(&clname
, nn
);
3293 unhash_client_locked(unconf
);
3294 if (conf
&& same_verf(&conf
->cl_verifier
, &clverifier
)) {
3295 /* case 1: probable callback update */
3296 copy_clid(new, conf
);
3297 gen_confirm(new, nn
);
3298 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3300 new->cl_minorversion
= 0;
3301 gen_callback(new, setclid
, rqstp
);
3302 add_to_unconfirmed(new);
3303 setclid
->se_clientid
.cl_boot
= new->cl_clientid
.cl_boot
;
3304 setclid
->se_clientid
.cl_id
= new->cl_clientid
.cl_id
;
3305 memcpy(setclid
->se_confirm
.data
, new->cl_confirm
.data
, sizeof(setclid
->se_confirm
.data
));
3309 spin_unlock(&nn
->client_lock
);
3313 expire_client(unconf
);
3319 nfsd4_setclientid_confirm(struct svc_rqst
*rqstp
,
3320 struct nfsd4_compound_state
*cstate
,
3321 union nfsd4_op_u
*u
)
3323 struct nfsd4_setclientid_confirm
*setclientid_confirm
=
3324 &u
->setclientid_confirm
;
3325 struct nfs4_client
*conf
, *unconf
;
3326 struct nfs4_client
*old
= NULL
;
3327 nfs4_verifier confirm
= setclientid_confirm
->sc_confirm
;
3328 clientid_t
* clid
= &setclientid_confirm
->sc_clientid
;
3330 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3332 if (STALE_CLIENTID(clid
, nn
))
3333 return nfserr_stale_clientid
;
3335 spin_lock(&nn
->client_lock
);
3336 conf
= find_confirmed_client(clid
, false, nn
);
3337 unconf
= find_unconfirmed_client(clid
, false, nn
);
3339 * We try hard to give out unique clientid's, so if we get an
3340 * attempt to confirm the same clientid with a different cred,
3341 * the client may be buggy; this should never happen.
3343 * Nevertheless, RFC 7530 recommends INUSE for this case:
3345 status
= nfserr_clid_inuse
;
3346 if (unconf
&& !same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
))
3348 if (conf
&& !same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
))
3350 /* cases below refer to rfc 3530 section 14.2.34: */
3351 if (!unconf
|| !same_verf(&confirm
, &unconf
->cl_confirm
)) {
3352 if (conf
&& same_verf(&confirm
, &conf
->cl_confirm
)) {
3353 /* case 2: probable retransmit */
3355 } else /* case 4: client hasn't noticed we rebooted yet? */
3356 status
= nfserr_stale_clientid
;
3360 if (conf
) { /* case 1: callback update */
3362 unhash_client_locked(old
);
3363 nfsd4_change_callback(conf
, &unconf
->cl_cb_conn
);
3364 } else { /* case 3: normal case; new or rebooted client */
3365 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
3367 status
= nfserr_clid_inuse
;
3368 if (client_has_state(old
)
3369 && !same_creds(&unconf
->cl_cred
,
3372 status
= mark_client_expired_locked(old
);
3378 move_to_confirmed(unconf
);
3381 get_client_locked(conf
);
3382 spin_unlock(&nn
->client_lock
);
3383 nfsd4_probe_callback(conf
);
3384 spin_lock(&nn
->client_lock
);
3385 put_client_renew_locked(conf
);
3387 spin_unlock(&nn
->client_lock
);
3393 static struct nfs4_file
*nfsd4_alloc_file(void)
3395 return kmem_cache_alloc(file_slab
, GFP_KERNEL
);
3398 /* OPEN Share state helper functions */
3399 static void nfsd4_init_file(struct knfsd_fh
*fh
, unsigned int hashval
,
3400 struct nfs4_file
*fp
)
3402 lockdep_assert_held(&state_lock
);
3404 refcount_set(&fp
->fi_ref
, 1);
3405 spin_lock_init(&fp
->fi_lock
);
3406 INIT_LIST_HEAD(&fp
->fi_stateids
);
3407 INIT_LIST_HEAD(&fp
->fi_delegations
);
3408 INIT_LIST_HEAD(&fp
->fi_clnt_odstate
);
3409 fh_copy_shallow(&fp
->fi_fhandle
, fh
);
3410 fp
->fi_deleg_file
= NULL
;
3411 fp
->fi_had_conflict
= false;
3412 fp
->fi_share_deny
= 0;
3413 memset(fp
->fi_fds
, 0, sizeof(fp
->fi_fds
));
3414 memset(fp
->fi_access
, 0, sizeof(fp
->fi_access
));
3415 #ifdef CONFIG_NFSD_PNFS
3416 INIT_LIST_HEAD(&fp
->fi_lo_states
);
3417 atomic_set(&fp
->fi_lo_recalls
, 0);
3419 hlist_add_head_rcu(&fp
->fi_hash
, &file_hashtbl
[hashval
]);
3423 nfsd4_free_slabs(void)
3425 kmem_cache_destroy(odstate_slab
);
3426 kmem_cache_destroy(openowner_slab
);
3427 kmem_cache_destroy(lockowner_slab
);
3428 kmem_cache_destroy(file_slab
);
3429 kmem_cache_destroy(stateid_slab
);
3430 kmem_cache_destroy(deleg_slab
);
3434 nfsd4_init_slabs(void)
3436 openowner_slab
= kmem_cache_create("nfsd4_openowners",
3437 sizeof(struct nfs4_openowner
), 0, 0, NULL
);
3438 if (openowner_slab
== NULL
)
3440 lockowner_slab
= kmem_cache_create("nfsd4_lockowners",
3441 sizeof(struct nfs4_lockowner
), 0, 0, NULL
);
3442 if (lockowner_slab
== NULL
)
3443 goto out_free_openowner_slab
;
3444 file_slab
= kmem_cache_create("nfsd4_files",
3445 sizeof(struct nfs4_file
), 0, 0, NULL
);
3446 if (file_slab
== NULL
)
3447 goto out_free_lockowner_slab
;
3448 stateid_slab
= kmem_cache_create("nfsd4_stateids",
3449 sizeof(struct nfs4_ol_stateid
), 0, 0, NULL
);
3450 if (stateid_slab
== NULL
)
3451 goto out_free_file_slab
;
3452 deleg_slab
= kmem_cache_create("nfsd4_delegations",
3453 sizeof(struct nfs4_delegation
), 0, 0, NULL
);
3454 if (deleg_slab
== NULL
)
3455 goto out_free_stateid_slab
;
3456 odstate_slab
= kmem_cache_create("nfsd4_odstate",
3457 sizeof(struct nfs4_clnt_odstate
), 0, 0, NULL
);
3458 if (odstate_slab
== NULL
)
3459 goto out_free_deleg_slab
;
3462 out_free_deleg_slab
:
3463 kmem_cache_destroy(deleg_slab
);
3464 out_free_stateid_slab
:
3465 kmem_cache_destroy(stateid_slab
);
3467 kmem_cache_destroy(file_slab
);
3468 out_free_lockowner_slab
:
3469 kmem_cache_destroy(lockowner_slab
);
3470 out_free_openowner_slab
:
3471 kmem_cache_destroy(openowner_slab
);
3473 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3477 static void init_nfs4_replay(struct nfs4_replay
*rp
)
3479 rp
->rp_status
= nfserr_serverfault
;
3481 rp
->rp_buf
= rp
->rp_ibuf
;
3482 mutex_init(&rp
->rp_mutex
);
3485 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state
*cstate
,
3486 struct nfs4_stateowner
*so
)
3488 if (!nfsd4_has_session(cstate
)) {
3489 mutex_lock(&so
->so_replay
.rp_mutex
);
3490 cstate
->replay_owner
= nfs4_get_stateowner(so
);
3494 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state
*cstate
)
3496 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
3499 cstate
->replay_owner
= NULL
;
3500 mutex_unlock(&so
->so_replay
.rp_mutex
);
3501 nfs4_put_stateowner(so
);
3505 static inline void *alloc_stateowner(struct kmem_cache
*slab
, struct xdr_netobj
*owner
, struct nfs4_client
*clp
)
3507 struct nfs4_stateowner
*sop
;
3509 sop
= kmem_cache_alloc(slab
, GFP_KERNEL
);
3513 sop
->so_owner
.data
= kmemdup(owner
->data
, owner
->len
, GFP_KERNEL
);
3514 if (!sop
->so_owner
.data
) {
3515 kmem_cache_free(slab
, sop
);
3518 sop
->so_owner
.len
= owner
->len
;
3520 INIT_LIST_HEAD(&sop
->so_stateids
);
3521 sop
->so_client
= clp
;
3522 init_nfs4_replay(&sop
->so_replay
);
3523 atomic_set(&sop
->so_count
, 1);
3527 static void hash_openowner(struct nfs4_openowner
*oo
, struct nfs4_client
*clp
, unsigned int strhashval
)
3529 lockdep_assert_held(&clp
->cl_lock
);
3531 list_add(&oo
->oo_owner
.so_strhash
,
3532 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
3533 list_add(&oo
->oo_perclient
, &clp
->cl_openowners
);
3536 static void nfs4_unhash_openowner(struct nfs4_stateowner
*so
)
3538 unhash_openowner_locked(openowner(so
));
3541 static void nfs4_free_openowner(struct nfs4_stateowner
*so
)
3543 struct nfs4_openowner
*oo
= openowner(so
);
3545 kmem_cache_free(openowner_slab
, oo
);
3548 static const struct nfs4_stateowner_operations openowner_ops
= {
3549 .so_unhash
= nfs4_unhash_openowner
,
3550 .so_free
= nfs4_free_openowner
,
3553 static struct nfs4_ol_stateid
*
3554 nfsd4_find_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3556 struct nfs4_ol_stateid
*local
, *ret
= NULL
;
3557 struct nfs4_openowner
*oo
= open
->op_openowner
;
3559 lockdep_assert_held(&fp
->fi_lock
);
3561 list_for_each_entry(local
, &fp
->fi_stateids
, st_perfile
) {
3562 /* ignore lock owners */
3563 if (local
->st_stateowner
->so_is_open_owner
== 0)
3565 if (local
->st_stateowner
== &oo
->oo_owner
) {
3567 refcount_inc(&ret
->st_stid
.sc_count
);
3574 static struct nfs4_openowner
*
3575 alloc_init_open_stateowner(unsigned int strhashval
, struct nfsd4_open
*open
,
3576 struct nfsd4_compound_state
*cstate
)
3578 struct nfs4_client
*clp
= cstate
->clp
;
3579 struct nfs4_openowner
*oo
, *ret
;
3581 oo
= alloc_stateowner(openowner_slab
, &open
->op_owner
, clp
);
3584 oo
->oo_owner
.so_ops
= &openowner_ops
;
3585 oo
->oo_owner
.so_is_open_owner
= 1;
3586 oo
->oo_owner
.so_seqid
= open
->op_seqid
;
3588 if (nfsd4_has_session(cstate
))
3589 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
3591 oo
->oo_last_closed_stid
= NULL
;
3592 INIT_LIST_HEAD(&oo
->oo_close_lru
);
3593 spin_lock(&clp
->cl_lock
);
3594 ret
= find_openstateowner_str_locked(strhashval
, open
, clp
);
3596 hash_openowner(oo
, clp
, strhashval
);
3599 nfs4_free_stateowner(&oo
->oo_owner
);
3601 spin_unlock(&clp
->cl_lock
);
3605 static struct nfs4_ol_stateid
*
3606 init_open_stateid(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3609 struct nfs4_openowner
*oo
= open
->op_openowner
;
3610 struct nfs4_ol_stateid
*retstp
= NULL
;
3611 struct nfs4_ol_stateid
*stp
;
3614 /* We are moving these outside of the spinlocks to avoid the warnings */
3615 mutex_init(&stp
->st_mutex
);
3616 mutex_lock(&stp
->st_mutex
);
3618 spin_lock(&oo
->oo_owner
.so_client
->cl_lock
);
3619 spin_lock(&fp
->fi_lock
);
3621 retstp
= nfsd4_find_existing_open(fp
, open
);
3625 open
->op_stp
= NULL
;
3626 refcount_inc(&stp
->st_stid
.sc_count
);
3627 stp
->st_stid
.sc_type
= NFS4_OPEN_STID
;
3628 INIT_LIST_HEAD(&stp
->st_locks
);
3629 stp
->st_stateowner
= nfs4_get_stateowner(&oo
->oo_owner
);
3631 stp
->st_stid
.sc_file
= fp
;
3632 stp
->st_access_bmap
= 0;
3633 stp
->st_deny_bmap
= 0;
3634 stp
->st_openstp
= NULL
;
3635 list_add(&stp
->st_perstateowner
, &oo
->oo_owner
.so_stateids
);
3636 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
3639 spin_unlock(&fp
->fi_lock
);
3640 spin_unlock(&oo
->oo_owner
.so_client
->cl_lock
);
3642 mutex_lock(&retstp
->st_mutex
);
3643 /* To keep mutex tracking happy */
3644 mutex_unlock(&stp
->st_mutex
);
3651 * In the 4.0 case we need to keep the owners around a little while to handle
3652 * CLOSE replay. We still do need to release any file access that is held by
3653 * them before returning however.
3656 move_to_close_lru(struct nfs4_ol_stateid
*s
, struct net
*net
)
3658 struct nfs4_ol_stateid
*last
;
3659 struct nfs4_openowner
*oo
= openowner(s
->st_stateowner
);
3660 struct nfsd_net
*nn
= net_generic(s
->st_stid
.sc_client
->net
,
3663 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo
);
3666 * We know that we hold one reference via nfsd4_close, and another
3667 * "persistent" reference for the client. If the refcount is higher
3668 * than 2, then there are still calls in progress that are using this
3669 * stateid. We can't put the sc_file reference until they are finished.
3670 * Wait for the refcount to drop to 2. Since it has been unhashed,
3671 * there should be no danger of the refcount going back up again at
3674 wait_event(close_wq
, refcount_read(&s
->st_stid
.sc_count
) == 2);
3676 release_all_access(s
);
3677 if (s
->st_stid
.sc_file
) {
3678 put_nfs4_file(s
->st_stid
.sc_file
);
3679 s
->st_stid
.sc_file
= NULL
;
3682 spin_lock(&nn
->client_lock
);
3683 last
= oo
->oo_last_closed_stid
;
3684 oo
->oo_last_closed_stid
= s
;
3685 list_move_tail(&oo
->oo_close_lru
, &nn
->close_lru
);
3686 oo
->oo_time
= get_seconds();
3687 spin_unlock(&nn
->client_lock
);
3689 nfs4_put_stid(&last
->st_stid
);
3692 /* search file_hashtbl[] for file */
3693 static struct nfs4_file
*
3694 find_file_locked(struct knfsd_fh
*fh
, unsigned int hashval
)
3696 struct nfs4_file
*fp
;
3698 hlist_for_each_entry_rcu(fp
, &file_hashtbl
[hashval
], fi_hash
) {
3699 if (fh_match(&fp
->fi_fhandle
, fh
)) {
3700 if (refcount_inc_not_zero(&fp
->fi_ref
))
3708 find_file(struct knfsd_fh
*fh
)
3710 struct nfs4_file
*fp
;
3711 unsigned int hashval
= file_hashval(fh
);
3714 fp
= find_file_locked(fh
, hashval
);
3719 static struct nfs4_file
*
3720 find_or_add_file(struct nfs4_file
*new, struct knfsd_fh
*fh
)
3722 struct nfs4_file
*fp
;
3723 unsigned int hashval
= file_hashval(fh
);
3726 fp
= find_file_locked(fh
, hashval
);
3731 spin_lock(&state_lock
);
3732 fp
= find_file_locked(fh
, hashval
);
3733 if (likely(fp
== NULL
)) {
3734 nfsd4_init_file(fh
, hashval
, new);
3737 spin_unlock(&state_lock
);
3743 * Called to check deny when READ with all zero stateid or
3744 * WRITE with all zero or all one stateid
3747 nfs4_share_conflict(struct svc_fh
*current_fh
, unsigned int deny_type
)
3749 struct nfs4_file
*fp
;
3750 __be32 ret
= nfs_ok
;
3752 fp
= find_file(¤t_fh
->fh_handle
);
3755 /* Check for conflicting share reservations */
3756 spin_lock(&fp
->fi_lock
);
3757 if (fp
->fi_share_deny
& deny_type
)
3758 ret
= nfserr_locked
;
3759 spin_unlock(&fp
->fi_lock
);
3764 static void nfsd4_cb_recall_prepare(struct nfsd4_callback
*cb
)
3766 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3767 struct nfsd_net
*nn
= net_generic(dp
->dl_stid
.sc_client
->net
,
3770 block_delegations(&dp
->dl_stid
.sc_file
->fi_fhandle
);
3773 * We can't do this in nfsd_break_deleg_cb because it is
3774 * already holding inode->i_lock.
3776 * If the dl_time != 0, then we know that it has already been
3777 * queued for a lease break. Don't queue it again.
3779 spin_lock(&state_lock
);
3780 if (dp
->dl_time
== 0) {
3781 dp
->dl_time
= get_seconds();
3782 list_add_tail(&dp
->dl_recall_lru
, &nn
->del_recall_lru
);
3784 spin_unlock(&state_lock
);
3787 static int nfsd4_cb_recall_done(struct nfsd4_callback
*cb
,
3788 struct rpc_task
*task
)
3790 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3792 if (dp
->dl_stid
.sc_type
== NFS4_CLOSED_DELEG_STID
)
3795 switch (task
->tk_status
) {
3799 case -NFS4ERR_BAD_STATEID
:
3801 * Race: client probably got cb_recall before open reply
3802 * granting delegation.
3804 if (dp
->dl_retries
--) {
3805 rpc_delay(task
, 2 * HZ
);
3814 static void nfsd4_cb_recall_release(struct nfsd4_callback
*cb
)
3816 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
3818 nfs4_put_stid(&dp
->dl_stid
);
3821 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops
= {
3822 .prepare
= nfsd4_cb_recall_prepare
,
3823 .done
= nfsd4_cb_recall_done
,
3824 .release
= nfsd4_cb_recall_release
,
3827 static void nfsd_break_one_deleg(struct nfs4_delegation
*dp
)
3830 * We're assuming the state code never drops its reference
3831 * without first removing the lease. Since we're in this lease
3832 * callback (and since the lease code is serialized by the kernel
3833 * lock) we know the server hasn't removed the lease yet, we know
3834 * it's safe to take a reference.
3836 refcount_inc(&dp
->dl_stid
.sc_count
);
3837 nfsd4_run_cb(&dp
->dl_recall
);
3840 /* Called from break_lease() with i_lock held. */
3842 nfsd_break_deleg_cb(struct file_lock
*fl
)
3845 struct nfs4_file
*fp
= (struct nfs4_file
*)fl
->fl_owner
;
3846 struct nfs4_delegation
*dp
;
3849 WARN(1, "(%p)->fl_owner NULL\n", fl
);
3852 if (fp
->fi_had_conflict
) {
3853 WARN(1, "duplicate break on %p\n", fp
);
3857 * We don't want the locks code to timeout the lease for us;
3858 * we'll remove it ourself if a delegation isn't returned
3861 fl
->fl_break_time
= 0;
3863 spin_lock(&fp
->fi_lock
);
3864 fp
->fi_had_conflict
= true;
3866 * If there are no delegations on the list, then return true
3867 * so that the lease code will go ahead and delete it.
3869 if (list_empty(&fp
->fi_delegations
))
3872 list_for_each_entry(dp
, &fp
->fi_delegations
, dl_perfile
)
3873 nfsd_break_one_deleg(dp
);
3874 spin_unlock(&fp
->fi_lock
);
3879 nfsd_change_deleg_cb(struct file_lock
*onlist
, int arg
,
3880 struct list_head
*dispose
)
3883 return lease_modify(onlist
, arg
, dispose
);
3888 static const struct lock_manager_operations nfsd_lease_mng_ops
= {
3889 .lm_break
= nfsd_break_deleg_cb
,
3890 .lm_change
= nfsd_change_deleg_cb
,
3893 static __be32
nfsd4_check_seqid(struct nfsd4_compound_state
*cstate
, struct nfs4_stateowner
*so
, u32 seqid
)
3895 if (nfsd4_has_session(cstate
))
3897 if (seqid
== so
->so_seqid
- 1)
3898 return nfserr_replay_me
;
3899 if (seqid
== so
->so_seqid
)
3901 return nfserr_bad_seqid
;
3904 static __be32
lookup_clientid(clientid_t
*clid
,
3905 struct nfsd4_compound_state
*cstate
,
3906 struct nfsd_net
*nn
)
3908 struct nfs4_client
*found
;
3911 found
= cstate
->clp
;
3912 if (!same_clid(&found
->cl_clientid
, clid
))
3913 return nfserr_stale_clientid
;
3917 if (STALE_CLIENTID(clid
, nn
))
3918 return nfserr_stale_clientid
;
3921 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3922 * cached already then we know this is for is for v4.0 and "sessions"
3925 WARN_ON_ONCE(cstate
->session
);
3926 spin_lock(&nn
->client_lock
);
3927 found
= find_confirmed_client(clid
, false, nn
);
3929 spin_unlock(&nn
->client_lock
);
3930 return nfserr_expired
;
3932 atomic_inc(&found
->cl_refcount
);
3933 spin_unlock(&nn
->client_lock
);
3935 /* Cache the nfs4_client in cstate! */
3936 cstate
->clp
= found
;
3941 nfsd4_process_open1(struct nfsd4_compound_state
*cstate
,
3942 struct nfsd4_open
*open
, struct nfsd_net
*nn
)
3944 clientid_t
*clientid
= &open
->op_clientid
;
3945 struct nfs4_client
*clp
= NULL
;
3946 unsigned int strhashval
;
3947 struct nfs4_openowner
*oo
= NULL
;
3950 if (STALE_CLIENTID(&open
->op_clientid
, nn
))
3951 return nfserr_stale_clientid
;
3953 * In case we need it later, after we've already created the
3954 * file and don't want to risk a further failure:
3956 open
->op_file
= nfsd4_alloc_file();
3957 if (open
->op_file
== NULL
)
3958 return nfserr_jukebox
;
3960 status
= lookup_clientid(clientid
, cstate
, nn
);
3965 strhashval
= ownerstr_hashval(&open
->op_owner
);
3966 oo
= find_openstateowner_str(strhashval
, open
, clp
);
3967 open
->op_openowner
= oo
;
3971 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
3972 /* Replace unconfirmed owners without checking for replay. */
3973 release_openowner(oo
);
3974 open
->op_openowner
= NULL
;
3977 status
= nfsd4_check_seqid(cstate
, &oo
->oo_owner
, open
->op_seqid
);
3982 oo
= alloc_init_open_stateowner(strhashval
, open
, cstate
);
3984 return nfserr_jukebox
;
3985 open
->op_openowner
= oo
;
3987 open
->op_stp
= nfs4_alloc_open_stateid(clp
);
3989 return nfserr_jukebox
;
3991 if (nfsd4_has_session(cstate
) &&
3992 (cstate
->current_fh
.fh_export
->ex_flags
& NFSEXP_PNFS
)) {
3993 open
->op_odstate
= alloc_clnt_odstate(clp
);
3994 if (!open
->op_odstate
)
3995 return nfserr_jukebox
;
4001 static inline __be32
4002 nfs4_check_delegmode(struct nfs4_delegation
*dp
, int flags
)
4004 if ((flags
& WR_STATE
) && (dp
->dl_type
== NFS4_OPEN_DELEGATE_READ
))
4005 return nfserr_openmode
;
4010 static int share_access_to_flags(u32 share_access
)
4012 return share_access
== NFS4_SHARE_ACCESS_READ
? RD_STATE
: WR_STATE
;
4015 static struct nfs4_delegation
*find_deleg_stateid(struct nfs4_client
*cl
, stateid_t
*s
)
4017 struct nfs4_stid
*ret
;
4019 ret
= find_stateid_by_type(cl
, s
,
4020 NFS4_DELEG_STID
|NFS4_REVOKED_DELEG_STID
);
4023 return delegstateid(ret
);
4026 static bool nfsd4_is_deleg_cur(struct nfsd4_open
*open
)
4028 return open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEGATE_CUR
||
4029 open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEG_CUR_FH
;
4033 nfs4_check_deleg(struct nfs4_client
*cl
, struct nfsd4_open
*open
,
4034 struct nfs4_delegation
**dp
)
4037 __be32 status
= nfserr_bad_stateid
;
4038 struct nfs4_delegation
*deleg
;
4040 deleg
= find_deleg_stateid(cl
, &open
->op_delegate_stateid
);
4043 if (deleg
->dl_stid
.sc_type
== NFS4_REVOKED_DELEG_STID
) {
4044 nfs4_put_stid(&deleg
->dl_stid
);
4045 if (cl
->cl_minorversion
)
4046 status
= nfserr_deleg_revoked
;
4049 flags
= share_access_to_flags(open
->op_share_access
);
4050 status
= nfs4_check_delegmode(deleg
, flags
);
4052 nfs4_put_stid(&deleg
->dl_stid
);
4057 if (!nfsd4_is_deleg_cur(open
))
4061 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
4065 static inline int nfs4_access_to_access(u32 nfs4_access
)
4069 if (nfs4_access
& NFS4_SHARE_ACCESS_READ
)
4070 flags
|= NFSD_MAY_READ
;
4071 if (nfs4_access
& NFS4_SHARE_ACCESS_WRITE
)
4072 flags
|= NFSD_MAY_WRITE
;
4076 static inline __be32
4077 nfsd4_truncate(struct svc_rqst
*rqstp
, struct svc_fh
*fh
,
4078 struct nfsd4_open
*open
)
4080 struct iattr iattr
= {
4081 .ia_valid
= ATTR_SIZE
,
4084 if (!open
->op_truncate
)
4086 if (!(open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
))
4087 return nfserr_inval
;
4088 return nfsd_setattr(rqstp
, fh
, &iattr
, 0, (time_t)0);
4091 static __be32
nfs4_get_vfs_file(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
,
4092 struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
,
4093 struct nfsd4_open
*open
)
4095 struct file
*filp
= NULL
;
4097 int oflag
= nfs4_access_to_omode(open
->op_share_access
);
4098 int access
= nfs4_access_to_access(open
->op_share_access
);
4099 unsigned char old_access_bmap
, old_deny_bmap
;
4101 spin_lock(&fp
->fi_lock
);
4104 * Are we trying to set a deny mode that would conflict with
4107 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
4108 if (status
!= nfs_ok
) {
4109 spin_unlock(&fp
->fi_lock
);
4113 /* set access to the file */
4114 status
= nfs4_file_get_access(fp
, open
->op_share_access
);
4115 if (status
!= nfs_ok
) {
4116 spin_unlock(&fp
->fi_lock
);
4120 /* Set access bits in stateid */
4121 old_access_bmap
= stp
->st_access_bmap
;
4122 set_access(open
->op_share_access
, stp
);
4124 /* Set new deny mask */
4125 old_deny_bmap
= stp
->st_deny_bmap
;
4126 set_deny(open
->op_share_deny
, stp
);
4127 fp
->fi_share_deny
|= (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4129 if (!fp
->fi_fds
[oflag
]) {
4130 spin_unlock(&fp
->fi_lock
);
4131 status
= nfsd_open(rqstp
, cur_fh
, S_IFREG
, access
, &filp
);
4133 goto out_put_access
;
4134 spin_lock(&fp
->fi_lock
);
4135 if (!fp
->fi_fds
[oflag
]) {
4136 fp
->fi_fds
[oflag
] = filp
;
4140 spin_unlock(&fp
->fi_lock
);
4144 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4146 goto out_put_access
;
4150 stp
->st_access_bmap
= old_access_bmap
;
4151 nfs4_file_put_access(fp
, open
->op_share_access
);
4152 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap
), stp
);
4157 nfs4_upgrade_open(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
, struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
, struct nfsd4_open
*open
)
4160 unsigned char old_deny_bmap
= stp
->st_deny_bmap
;
4162 if (!test_access(open
->op_share_access
, stp
))
4163 return nfs4_get_vfs_file(rqstp
, fp
, cur_fh
, stp
, open
);
4165 /* test and set deny mode */
4166 spin_lock(&fp
->fi_lock
);
4167 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
4168 if (status
== nfs_ok
) {
4169 set_deny(open
->op_share_deny
, stp
);
4170 fp
->fi_share_deny
|=
4171 (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4173 spin_unlock(&fp
->fi_lock
);
4175 if (status
!= nfs_ok
)
4178 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4179 if (status
!= nfs_ok
)
4180 reset_union_bmap_deny(old_deny_bmap
, stp
);
4184 /* Should we give out recallable state?: */
4185 static bool nfsd4_cb_channel_good(struct nfs4_client
*clp
)
4187 if (clp
->cl_cb_state
== NFSD4_CB_UP
)
4190 * In the sessions case, since we don't have to establish a
4191 * separate connection for callbacks, we assume it's OK
4192 * until we hear otherwise:
4194 return clp
->cl_minorversion
&& clp
->cl_cb_state
== NFSD4_CB_UNKNOWN
;
4197 static struct file_lock
*nfs4_alloc_init_lease(struct nfs4_file
*fp
, int flag
)
4199 struct file_lock
*fl
;
4201 fl
= locks_alloc_lock();
4204 fl
->fl_lmops
= &nfsd_lease_mng_ops
;
4205 fl
->fl_flags
= FL_DELEG
;
4206 fl
->fl_type
= flag
== NFS4_OPEN_DELEGATE_READ
? F_RDLCK
: F_WRLCK
;
4207 fl
->fl_end
= OFFSET_MAX
;
4208 fl
->fl_owner
= (fl_owner_t
)fp
;
4209 fl
->fl_pid
= current
->tgid
;
4214 * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
4215 * @dp: a pointer to the nfs4_delegation we're adding.
4218 * On success: Return code will be 0 on success.
4220 * On error: -EAGAIN if there was an existing delegation.
4221 * nonzero if there is an error in other cases.
4225 static int nfs4_setlease(struct nfs4_delegation
*dp
)
4227 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
4228 struct file_lock
*fl
;
4232 fl
= nfs4_alloc_init_lease(fp
, NFS4_OPEN_DELEGATE_READ
);
4235 filp
= find_readable_file(fp
);
4237 /* We should always have a readable file here */
4239 locks_free_lock(fl
);
4243 status
= vfs_setlease(filp
, fl
->fl_type
, &fl
, NULL
);
4245 locks_free_lock(fl
);
4248 spin_lock(&state_lock
);
4249 spin_lock(&fp
->fi_lock
);
4250 /* Did the lease get broken before we took the lock? */
4252 if (fp
->fi_had_conflict
)
4255 if (fp
->fi_deleg_file
) {
4256 status
= hash_delegation_locked(dp
, fp
);
4259 fp
->fi_deleg_file
= filp
;
4260 fp
->fi_delegees
= 0;
4261 status
= hash_delegation_locked(dp
, fp
);
4262 spin_unlock(&fp
->fi_lock
);
4263 spin_unlock(&state_lock
);
4265 /* Should never happen, this is a new fi_deleg_file */
4271 spin_unlock(&fp
->fi_lock
);
4272 spin_unlock(&state_lock
);
4278 static struct nfs4_delegation
*
4279 nfs4_set_delegation(struct nfs4_client
*clp
, struct svc_fh
*fh
,
4280 struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*odstate
)
4283 struct nfs4_delegation
*dp
;
4285 if (fp
->fi_had_conflict
)
4286 return ERR_PTR(-EAGAIN
);
4288 spin_lock(&state_lock
);
4289 spin_lock(&fp
->fi_lock
);
4290 status
= nfs4_get_existing_delegation(clp
, fp
);
4291 spin_unlock(&fp
->fi_lock
);
4292 spin_unlock(&state_lock
);
4295 return ERR_PTR(status
);
4297 dp
= alloc_init_deleg(clp
, fh
, odstate
);
4299 return ERR_PTR(-ENOMEM
);
4302 spin_lock(&state_lock
);
4303 spin_lock(&fp
->fi_lock
);
4304 dp
->dl_stid
.sc_file
= fp
;
4305 if (!fp
->fi_deleg_file
) {
4306 spin_unlock(&fp
->fi_lock
);
4307 spin_unlock(&state_lock
);
4308 status
= nfs4_setlease(dp
);
4311 if (fp
->fi_had_conflict
) {
4315 status
= hash_delegation_locked(dp
, fp
);
4317 spin_unlock(&fp
->fi_lock
);
4318 spin_unlock(&state_lock
);
4321 put_clnt_odstate(dp
->dl_clnt_odstate
);
4322 nfs4_put_stid(&dp
->dl_stid
);
4323 return ERR_PTR(status
);
4328 static void nfsd4_open_deleg_none_ext(struct nfsd4_open
*open
, int status
)
4330 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4331 if (status
== -EAGAIN
)
4332 open
->op_why_no_deleg
= WND4_CONTENTION
;
4334 open
->op_why_no_deleg
= WND4_RESOURCE
;
4335 switch (open
->op_deleg_want
) {
4336 case NFS4_SHARE_WANT_READ_DELEG
:
4337 case NFS4_SHARE_WANT_WRITE_DELEG
:
4338 case NFS4_SHARE_WANT_ANY_DELEG
:
4340 case NFS4_SHARE_WANT_CANCEL
:
4341 open
->op_why_no_deleg
= WND4_CANCELLED
;
4343 case NFS4_SHARE_WANT_NO_DELEG
:
4350 * Attempt to hand out a delegation.
4352 * Note we don't support write delegations, and won't until the vfs has
4353 * proper support for them.
4356 nfs4_open_delegation(struct svc_fh
*fh
, struct nfsd4_open
*open
,
4357 struct nfs4_ol_stateid
*stp
)
4359 struct nfs4_delegation
*dp
;
4360 struct nfs4_openowner
*oo
= openowner(stp
->st_stateowner
);
4361 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
4365 cb_up
= nfsd4_cb_channel_good(oo
->oo_owner
.so_client
);
4366 open
->op_recall
= 0;
4367 switch (open
->op_claim_type
) {
4368 case NFS4_OPEN_CLAIM_PREVIOUS
:
4370 open
->op_recall
= 1;
4371 if (open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_READ
)
4374 case NFS4_OPEN_CLAIM_NULL
:
4375 case NFS4_OPEN_CLAIM_FH
:
4377 * Let's not give out any delegations till everyone's
4378 * had the chance to reclaim theirs, *and* until
4379 * NLM locks have all been reclaimed:
4381 if (locks_in_grace(clp
->net
))
4383 if (!cb_up
|| !(oo
->oo_flags
& NFS4_OO_CONFIRMED
))
4386 * Also, if the file was opened for write or
4387 * create, there's a good chance the client's
4388 * about to write to it, resulting in an
4389 * immediate recall (since we don't support
4390 * write delegations):
4392 if (open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
)
4394 if (open
->op_create
== NFS4_OPEN_CREATE
)
4400 dp
= nfs4_set_delegation(clp
, fh
, stp
->st_stid
.sc_file
, stp
->st_clnt_odstate
);
4404 memcpy(&open
->op_delegate_stateid
, &dp
->dl_stid
.sc_stateid
, sizeof(dp
->dl_stid
.sc_stateid
));
4406 dprintk("NFSD: delegation stateid=" STATEID_FMT
"\n",
4407 STATEID_VAL(&dp
->dl_stid
.sc_stateid
));
4408 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_READ
;
4409 nfs4_put_stid(&dp
->dl_stid
);
4412 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE
;
4413 if (open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
&&
4414 open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_NONE
) {
4415 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4416 open
->op_recall
= 1;
4419 /* 4.1 client asking for a delegation? */
4420 if (open
->op_deleg_want
)
4421 nfsd4_open_deleg_none_ext(open
, status
);
4425 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open
*open
,
4426 struct nfs4_delegation
*dp
)
4428 if (open
->op_deleg_want
== NFS4_SHARE_WANT_READ_DELEG
&&
4429 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
4430 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4431 open
->op_why_no_deleg
= WND4_NOT_SUPP_DOWNGRADE
;
4432 } else if (open
->op_deleg_want
== NFS4_SHARE_WANT_WRITE_DELEG
&&
4433 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
4434 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4435 open
->op_why_no_deleg
= WND4_NOT_SUPP_UPGRADE
;
4437 /* Otherwise the client must be confused wanting a delegation
4438 * it already has, therefore we don't return
4439 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4444 nfsd4_process_open2(struct svc_rqst
*rqstp
, struct svc_fh
*current_fh
, struct nfsd4_open
*open
)
4446 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
4447 struct nfs4_client
*cl
= open
->op_openowner
->oo_owner
.so_client
;
4448 struct nfs4_file
*fp
= NULL
;
4449 struct nfs4_ol_stateid
*stp
= NULL
;
4450 struct nfs4_delegation
*dp
= NULL
;
4454 * Lookup file; if found, lookup stateid and check open request,
4455 * and check for delegations in the process of being recalled.
4456 * If not found, create the nfs4_file struct
4458 fp
= find_or_add_file(open
->op_file
, ¤t_fh
->fh_handle
);
4459 if (fp
!= open
->op_file
) {
4460 status
= nfs4_check_deleg(cl
, open
, &dp
);
4463 spin_lock(&fp
->fi_lock
);
4464 stp
= nfsd4_find_existing_open(fp
, open
);
4465 spin_unlock(&fp
->fi_lock
);
4467 open
->op_file
= NULL
;
4468 status
= nfserr_bad_stateid
;
4469 if (nfsd4_is_deleg_cur(open
))
4474 * OPEN the file, or upgrade an existing OPEN.
4475 * If truncate fails, the OPEN fails.
4478 /* Stateid was found, this is an OPEN upgrade */
4479 mutex_lock(&stp
->st_mutex
);
4480 status
= nfs4_upgrade_open(rqstp
, fp
, current_fh
, stp
, open
);
4482 mutex_unlock(&stp
->st_mutex
);
4486 /* stp is returned locked. */
4487 stp
= init_open_stateid(fp
, open
);
4488 /* See if we lost the race to some other thread */
4489 if (stp
->st_access_bmap
!= 0) {
4490 status
= nfs4_upgrade_open(rqstp
, fp
, current_fh
,
4493 mutex_unlock(&stp
->st_mutex
);
4498 status
= nfs4_get_vfs_file(rqstp
, fp
, current_fh
, stp
, open
);
4500 mutex_unlock(&stp
->st_mutex
);
4501 release_open_stateid(stp
);
4505 stp
->st_clnt_odstate
= find_or_hash_clnt_odstate(fp
,
4507 if (stp
->st_clnt_odstate
== open
->op_odstate
)
4508 open
->op_odstate
= NULL
;
4511 nfs4_inc_and_copy_stateid(&open
->op_stateid
, &stp
->st_stid
);
4512 mutex_unlock(&stp
->st_mutex
);
4514 if (nfsd4_has_session(&resp
->cstate
)) {
4515 if (open
->op_deleg_want
& NFS4_SHARE_WANT_NO_DELEG
) {
4516 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4517 open
->op_why_no_deleg
= WND4_NOT_WANTED
;
4523 * Attempt to hand out a delegation. No error return, because the
4524 * OPEN succeeds even if we fail.
4526 nfs4_open_delegation(current_fh
, open
, stp
);
4530 dprintk("%s: stateid=" STATEID_FMT
"\n", __func__
,
4531 STATEID_VAL(&stp
->st_stid
.sc_stateid
));
4533 /* 4.1 client trying to upgrade/downgrade delegation? */
4534 if (open
->op_delegate_type
== NFS4_OPEN_DELEGATE_NONE
&& dp
&&
4535 open
->op_deleg_want
)
4536 nfsd4_deleg_xgrade_none_ext(open
, dp
);
4540 if (status
== 0 && open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
)
4541 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
4543 * To finish the open response, we just need to set the rflags.
4545 open
->op_rflags
= NFS4_OPEN_RESULT_LOCKTYPE_POSIX
;
4546 if (nfsd4_has_session(&resp
->cstate
))
4547 open
->op_rflags
|= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK
;
4548 else if (!(open
->op_openowner
->oo_flags
& NFS4_OO_CONFIRMED
))
4549 open
->op_rflags
|= NFS4_OPEN_RESULT_CONFIRM
;
4552 nfs4_put_stid(&dp
->dl_stid
);
4554 nfs4_put_stid(&stp
->st_stid
);
4559 void nfsd4_cleanup_open_state(struct nfsd4_compound_state
*cstate
,
4560 struct nfsd4_open
*open
)
4562 if (open
->op_openowner
) {
4563 struct nfs4_stateowner
*so
= &open
->op_openowner
->oo_owner
;
4565 nfsd4_cstate_assign_replay(cstate
, so
);
4566 nfs4_put_stateowner(so
);
4569 kmem_cache_free(file_slab
, open
->op_file
);
4571 nfs4_put_stid(&open
->op_stp
->st_stid
);
4572 if (open
->op_odstate
)
4573 kmem_cache_free(odstate_slab
, open
->op_odstate
);
4577 nfsd4_renew(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4578 union nfsd4_op_u
*u
)
4580 clientid_t
*clid
= &u
->renew
;
4581 struct nfs4_client
*clp
;
4583 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4585 dprintk("process_renew(%08x/%08x): starting\n",
4586 clid
->cl_boot
, clid
->cl_id
);
4587 status
= lookup_clientid(clid
, cstate
, nn
);
4591 status
= nfserr_cb_path_down
;
4592 if (!list_empty(&clp
->cl_delegations
)
4593 && clp
->cl_cb_state
!= NFSD4_CB_UP
)
4601 nfsd4_end_grace(struct nfsd_net
*nn
)
4603 /* do nothing if grace period already ended */
4604 if (nn
->grace_ended
)
4607 dprintk("NFSD: end of grace period\n");
4608 nn
->grace_ended
= true;
4610 * If the server goes down again right now, an NFSv4
4611 * client will still be allowed to reclaim after it comes back up,
4612 * even if it hasn't yet had a chance to reclaim state this time.
4615 nfsd4_record_grace_done(nn
);
4617 * At this point, NFSv4 clients can still reclaim. But if the
4618 * server crashes, any that have not yet reclaimed will be out
4619 * of luck on the next boot.
4621 * (NFSv4.1+ clients are considered to have reclaimed once they
4622 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4623 * have reclaimed after their first OPEN.)
4625 locks_end_grace(&nn
->nfsd4_manager
);
4627 * At this point, and once lockd and/or any other containers
4628 * exit their grace period, further reclaims will fail and
4629 * regular locking can resume.
4634 nfs4_laundromat(struct nfsd_net
*nn
)
4636 struct nfs4_client
*clp
;
4637 struct nfs4_openowner
*oo
;
4638 struct nfs4_delegation
*dp
;
4639 struct nfs4_ol_stateid
*stp
;
4640 struct nfsd4_blocked_lock
*nbl
;
4641 struct list_head
*pos
, *next
, reaplist
;
4642 time_t cutoff
= get_seconds() - nn
->nfsd4_lease
;
4643 time_t t
, new_timeo
= nn
->nfsd4_lease
;
4645 dprintk("NFSD: laundromat service - starting\n");
4646 nfsd4_end_grace(nn
);
4647 INIT_LIST_HEAD(&reaplist
);
4648 spin_lock(&nn
->client_lock
);
4649 list_for_each_safe(pos
, next
, &nn
->client_lru
) {
4650 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4651 if (time_after((unsigned long)clp
->cl_time
, (unsigned long)cutoff
)) {
4652 t
= clp
->cl_time
- cutoff
;
4653 new_timeo
= min(new_timeo
, t
);
4656 if (mark_client_expired_locked(clp
)) {
4657 dprintk("NFSD: client in use (clientid %08x)\n",
4658 clp
->cl_clientid
.cl_id
);
4661 list_add(&clp
->cl_lru
, &reaplist
);
4663 spin_unlock(&nn
->client_lock
);
4664 list_for_each_safe(pos
, next
, &reaplist
) {
4665 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4666 dprintk("NFSD: purging unused client (clientid %08x)\n",
4667 clp
->cl_clientid
.cl_id
);
4668 list_del_init(&clp
->cl_lru
);
4671 spin_lock(&state_lock
);
4672 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
4673 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
4674 if (time_after((unsigned long)dp
->dl_time
, (unsigned long)cutoff
)) {
4675 t
= dp
->dl_time
- cutoff
;
4676 new_timeo
= min(new_timeo
, t
);
4679 WARN_ON(!unhash_delegation_locked(dp
));
4680 list_add(&dp
->dl_recall_lru
, &reaplist
);
4682 spin_unlock(&state_lock
);
4683 while (!list_empty(&reaplist
)) {
4684 dp
= list_first_entry(&reaplist
, struct nfs4_delegation
,
4686 list_del_init(&dp
->dl_recall_lru
);
4687 revoke_delegation(dp
);
4690 spin_lock(&nn
->client_lock
);
4691 while (!list_empty(&nn
->close_lru
)) {
4692 oo
= list_first_entry(&nn
->close_lru
, struct nfs4_openowner
,
4694 if (time_after((unsigned long)oo
->oo_time
,
4695 (unsigned long)cutoff
)) {
4696 t
= oo
->oo_time
- cutoff
;
4697 new_timeo
= min(new_timeo
, t
);
4700 list_del_init(&oo
->oo_close_lru
);
4701 stp
= oo
->oo_last_closed_stid
;
4702 oo
->oo_last_closed_stid
= NULL
;
4703 spin_unlock(&nn
->client_lock
);
4704 nfs4_put_stid(&stp
->st_stid
);
4705 spin_lock(&nn
->client_lock
);
4707 spin_unlock(&nn
->client_lock
);
4710 * It's possible for a client to try and acquire an already held lock
4711 * that is being held for a long time, and then lose interest in it.
4712 * So, we clean out any un-revisited request after a lease period
4713 * under the assumption that the client is no longer interested.
4715 * RFC5661, sec. 9.6 states that the client must not rely on getting
4716 * notifications and must continue to poll for locks, even when the
4717 * server supports them. Thus this shouldn't lead to clients blocking
4718 * indefinitely once the lock does become free.
4720 BUG_ON(!list_empty(&reaplist
));
4721 spin_lock(&nn
->blocked_locks_lock
);
4722 while (!list_empty(&nn
->blocked_locks_lru
)) {
4723 nbl
= list_first_entry(&nn
->blocked_locks_lru
,
4724 struct nfsd4_blocked_lock
, nbl_lru
);
4725 if (time_after((unsigned long)nbl
->nbl_time
,
4726 (unsigned long)cutoff
)) {
4727 t
= nbl
->nbl_time
- cutoff
;
4728 new_timeo
= min(new_timeo
, t
);
4731 list_move(&nbl
->nbl_lru
, &reaplist
);
4732 list_del_init(&nbl
->nbl_list
);
4734 spin_unlock(&nn
->blocked_locks_lock
);
4736 while (!list_empty(&reaplist
)) {
4737 nbl
= list_first_entry(&nn
->blocked_locks_lru
,
4738 struct nfsd4_blocked_lock
, nbl_lru
);
4739 list_del_init(&nbl
->nbl_lru
);
4740 posix_unblock_lock(&nbl
->nbl_lock
);
4741 free_blocked_lock(nbl
);
4744 new_timeo
= max_t(time_t, new_timeo
, NFSD_LAUNDROMAT_MINTIMEOUT
);
4748 static struct workqueue_struct
*laundry_wq
;
4749 static void laundromat_main(struct work_struct
*);
4752 laundromat_main(struct work_struct
*laundry
)
4755 struct delayed_work
*dwork
= to_delayed_work(laundry
);
4756 struct nfsd_net
*nn
= container_of(dwork
, struct nfsd_net
,
4759 t
= nfs4_laundromat(nn
);
4760 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t
);
4761 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, t
*HZ
);
4764 static inline __be32
nfs4_check_fh(struct svc_fh
*fhp
, struct nfs4_stid
*stp
)
4766 if (!fh_match(&fhp
->fh_handle
, &stp
->sc_file
->fi_fhandle
))
4767 return nfserr_bad_stateid
;
4772 access_permit_read(struct nfs4_ol_stateid
*stp
)
4774 return test_access(NFS4_SHARE_ACCESS_READ
, stp
) ||
4775 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
) ||
4776 test_access(NFS4_SHARE_ACCESS_WRITE
, stp
);
4780 access_permit_write(struct nfs4_ol_stateid
*stp
)
4782 return test_access(NFS4_SHARE_ACCESS_WRITE
, stp
) ||
4783 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
);
4787 __be32
nfs4_check_openmode(struct nfs4_ol_stateid
*stp
, int flags
)
4789 __be32 status
= nfserr_openmode
;
4791 /* For lock stateid's, we test the parent open, not the lock: */
4792 if (stp
->st_openstp
)
4793 stp
= stp
->st_openstp
;
4794 if ((flags
& WR_STATE
) && !access_permit_write(stp
))
4796 if ((flags
& RD_STATE
) && !access_permit_read(stp
))
4803 static inline __be32
4804 check_special_stateids(struct net
*net
, svc_fh
*current_fh
, stateid_t
*stateid
, int flags
)
4806 if (ONE_STATEID(stateid
) && (flags
& RD_STATE
))
4808 else if (opens_in_grace(net
)) {
4809 /* Answer in remaining cases depends on existence of
4810 * conflicting state; so we must wait out the grace period. */
4811 return nfserr_grace
;
4812 } else if (flags
& WR_STATE
)
4813 return nfs4_share_conflict(current_fh
,
4814 NFS4_SHARE_DENY_WRITE
);
4815 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4816 return nfs4_share_conflict(current_fh
,
4817 NFS4_SHARE_DENY_READ
);
4821 * Allow READ/WRITE during grace period on recovered state only for files
4822 * that are not able to provide mandatory locking.
4825 grace_disallows_io(struct net
*net
, struct inode
*inode
)
4827 return opens_in_grace(net
) && mandatory_lock(inode
);
4830 static __be32
check_stateid_generation(stateid_t
*in
, stateid_t
*ref
, bool has_session
)
4833 * When sessions are used the stateid generation number is ignored
4836 if (has_session
&& in
->si_generation
== 0)
4839 if (in
->si_generation
== ref
->si_generation
)
4842 /* If the client sends us a stateid from the future, it's buggy: */
4843 if (nfsd4_stateid_generation_after(in
, ref
))
4844 return nfserr_bad_stateid
;
4846 * However, we could see a stateid from the past, even from a
4847 * non-buggy client. For example, if the client sends a lock
4848 * while some IO is outstanding, the lock may bump si_generation
4849 * while the IO is still in flight. The client could avoid that
4850 * situation by waiting for responses on all the IO requests,
4851 * but better performance may result in retrying IO that
4852 * receives an old_stateid error if requests are rarely
4853 * reordered in flight:
4855 return nfserr_old_stateid
;
4858 static __be32
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid
*ols
)
4860 if (ols
->st_stateowner
->so_is_open_owner
&&
4861 !(openowner(ols
->st_stateowner
)->oo_flags
& NFS4_OO_CONFIRMED
))
4862 return nfserr_bad_stateid
;
4866 static __be32
nfsd4_validate_stateid(struct nfs4_client
*cl
, stateid_t
*stateid
)
4868 struct nfs4_stid
*s
;
4869 __be32 status
= nfserr_bad_stateid
;
4871 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
))
4873 /* Client debugging aid. */
4874 if (!same_clid(&stateid
->si_opaque
.so_clid
, &cl
->cl_clientid
)) {
4875 char addr_str
[INET6_ADDRSTRLEN
];
4876 rpc_ntop((struct sockaddr
*)&cl
->cl_addr
, addr_str
,
4878 pr_warn_ratelimited("NFSD: client %s testing state ID "
4879 "with incorrect client ID\n", addr_str
);
4882 spin_lock(&cl
->cl_lock
);
4883 s
= find_stateid_locked(cl
, stateid
);
4886 status
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
4889 switch (s
->sc_type
) {
4890 case NFS4_DELEG_STID
:
4893 case NFS4_REVOKED_DELEG_STID
:
4894 status
= nfserr_deleg_revoked
;
4896 case NFS4_OPEN_STID
:
4897 case NFS4_LOCK_STID
:
4898 status
= nfsd4_check_openowner_confirmed(openlockstateid(s
));
4901 printk("unknown stateid type %x\n", s
->sc_type
);
4903 case NFS4_CLOSED_STID
:
4904 case NFS4_CLOSED_DELEG_STID
:
4905 status
= nfserr_bad_stateid
;
4908 spin_unlock(&cl
->cl_lock
);
4913 nfsd4_lookup_stateid(struct nfsd4_compound_state
*cstate
,
4914 stateid_t
*stateid
, unsigned char typemask
,
4915 struct nfs4_stid
**s
, struct nfsd_net
*nn
)
4918 bool return_revoked
= false;
4921 * only return revoked delegations if explicitly asked.
4922 * otherwise we report revoked or bad_stateid status.
4924 if (typemask
& NFS4_REVOKED_DELEG_STID
)
4925 return_revoked
= true;
4926 else if (typemask
& NFS4_DELEG_STID
)
4927 typemask
|= NFS4_REVOKED_DELEG_STID
;
4929 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
))
4930 return nfserr_bad_stateid
;
4931 status
= lookup_clientid(&stateid
->si_opaque
.so_clid
, cstate
, nn
);
4932 if (status
== nfserr_stale_clientid
) {
4933 if (cstate
->session
)
4934 return nfserr_bad_stateid
;
4935 return nfserr_stale_stateid
;
4939 *s
= find_stateid_by_type(cstate
->clp
, stateid
, typemask
);
4941 return nfserr_bad_stateid
;
4942 if (((*s
)->sc_type
== NFS4_REVOKED_DELEG_STID
) && !return_revoked
) {
4944 if (cstate
->minorversion
)
4945 return nfserr_deleg_revoked
;
4946 return nfserr_bad_stateid
;
4951 static struct file
*
4952 nfs4_find_file(struct nfs4_stid
*s
, int flags
)
4957 switch (s
->sc_type
) {
4958 case NFS4_DELEG_STID
:
4959 if (WARN_ON_ONCE(!s
->sc_file
->fi_deleg_file
))
4961 return get_file(s
->sc_file
->fi_deleg_file
);
4962 case NFS4_OPEN_STID
:
4963 case NFS4_LOCK_STID
:
4964 if (flags
& RD_STATE
)
4965 return find_readable_file(s
->sc_file
);
4967 return find_writeable_file(s
->sc_file
);
4975 nfs4_check_olstateid(struct svc_fh
*fhp
, struct nfs4_ol_stateid
*ols
, int flags
)
4979 status
= nfsd4_check_openowner_confirmed(ols
);
4982 return nfs4_check_openmode(ols
, flags
);
4986 nfs4_check_file(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct nfs4_stid
*s
,
4987 struct file
**filpp
, bool *tmp_file
, int flags
)
4989 int acc
= (flags
& RD_STATE
) ? NFSD_MAY_READ
: NFSD_MAY_WRITE
;
4993 file
= nfs4_find_file(s
, flags
);
4995 status
= nfsd_permission(rqstp
, fhp
->fh_export
, fhp
->fh_dentry
,
4996 acc
| NFSD_MAY_OWNER_OVERRIDE
);
5004 status
= nfsd_open(rqstp
, fhp
, S_IFREG
, acc
, filpp
);
5016 * Checks for stateid operations
5019 nfs4_preprocess_stateid_op(struct svc_rqst
*rqstp
,
5020 struct nfsd4_compound_state
*cstate
, struct svc_fh
*fhp
,
5021 stateid_t
*stateid
, int flags
, struct file
**filpp
, bool *tmp_file
)
5023 struct inode
*ino
= d_inode(fhp
->fh_dentry
);
5024 struct net
*net
= SVC_NET(rqstp
);
5025 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5026 struct nfs4_stid
*s
= NULL
;
5034 if (grace_disallows_io(net
, ino
))
5035 return nfserr_grace
;
5037 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
)) {
5038 status
= check_special_stateids(net
, fhp
, stateid
, flags
);
5042 status
= nfsd4_lookup_stateid(cstate
, stateid
,
5043 NFS4_DELEG_STID
|NFS4_OPEN_STID
|NFS4_LOCK_STID
,
5047 status
= check_stateid_generation(stateid
, &s
->sc_stateid
,
5048 nfsd4_has_session(cstate
));
5052 switch (s
->sc_type
) {
5053 case NFS4_DELEG_STID
:
5054 status
= nfs4_check_delegmode(delegstateid(s
), flags
);
5056 case NFS4_OPEN_STID
:
5057 case NFS4_LOCK_STID
:
5058 status
= nfs4_check_olstateid(fhp
, openlockstateid(s
), flags
);
5061 status
= nfserr_bad_stateid
;
5066 status
= nfs4_check_fh(fhp
, s
);
5069 if (!status
&& filpp
)
5070 status
= nfs4_check_file(rqstp
, fhp
, s
, filpp
, tmp_file
, flags
);
5078 * Test if the stateid is valid
5081 nfsd4_test_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5082 union nfsd4_op_u
*u
)
5084 struct nfsd4_test_stateid
*test_stateid
= &u
->test_stateid
;
5085 struct nfsd4_test_stateid_id
*stateid
;
5086 struct nfs4_client
*cl
= cstate
->session
->se_client
;
5088 list_for_each_entry(stateid
, &test_stateid
->ts_stateid_list
, ts_id_list
)
5089 stateid
->ts_id_status
=
5090 nfsd4_validate_stateid(cl
, &stateid
->ts_id_stateid
);
5096 nfsd4_free_lock_stateid(stateid_t
*stateid
, struct nfs4_stid
*s
)
5098 struct nfs4_ol_stateid
*stp
= openlockstateid(s
);
5101 mutex_lock(&stp
->st_mutex
);
5103 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
5107 ret
= nfserr_locks_held
;
5108 if (check_for_locks(stp
->st_stid
.sc_file
,
5109 lockowner(stp
->st_stateowner
)))
5112 release_lock_stateid(stp
);
5116 mutex_unlock(&stp
->st_mutex
);
5122 nfsd4_free_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5123 union nfsd4_op_u
*u
)
5125 struct nfsd4_free_stateid
*free_stateid
= &u
->free_stateid
;
5126 stateid_t
*stateid
= &free_stateid
->fr_stateid
;
5127 struct nfs4_stid
*s
;
5128 struct nfs4_delegation
*dp
;
5129 struct nfs4_client
*cl
= cstate
->session
->se_client
;
5130 __be32 ret
= nfserr_bad_stateid
;
5132 spin_lock(&cl
->cl_lock
);
5133 s
= find_stateid_locked(cl
, stateid
);
5136 switch (s
->sc_type
) {
5137 case NFS4_DELEG_STID
:
5138 ret
= nfserr_locks_held
;
5140 case NFS4_OPEN_STID
:
5141 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
5144 ret
= nfserr_locks_held
;
5146 case NFS4_LOCK_STID
:
5147 refcount_inc(&s
->sc_count
);
5148 spin_unlock(&cl
->cl_lock
);
5149 ret
= nfsd4_free_lock_stateid(stateid
, s
);
5151 case NFS4_REVOKED_DELEG_STID
:
5152 dp
= delegstateid(s
);
5153 list_del_init(&dp
->dl_recall_lru
);
5154 spin_unlock(&cl
->cl_lock
);
5158 /* Default falls through and returns nfserr_bad_stateid */
5161 spin_unlock(&cl
->cl_lock
);
5169 return (type
== NFS4_READW_LT
|| type
== NFS4_READ_LT
) ?
5170 RD_STATE
: WR_STATE
;
5173 static __be32
nfs4_seqid_op_checks(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
, u32 seqid
, struct nfs4_ol_stateid
*stp
)
5175 struct svc_fh
*current_fh
= &cstate
->current_fh
;
5176 struct nfs4_stateowner
*sop
= stp
->st_stateowner
;
5179 status
= nfsd4_check_seqid(cstate
, sop
, seqid
);
5182 if (stp
->st_stid
.sc_type
== NFS4_CLOSED_STID
5183 || stp
->st_stid
.sc_type
== NFS4_REVOKED_DELEG_STID
)
5185 * "Closed" stateid's exist *only* to return
5186 * nfserr_replay_me from the previous step, and
5187 * revoked delegations are kept only for free_stateid.
5189 return nfserr_bad_stateid
;
5190 mutex_lock(&stp
->st_mutex
);
5191 status
= check_stateid_generation(stateid
, &stp
->st_stid
.sc_stateid
, nfsd4_has_session(cstate
));
5192 if (status
== nfs_ok
)
5193 status
= nfs4_check_fh(current_fh
, &stp
->st_stid
);
5194 if (status
!= nfs_ok
)
5195 mutex_unlock(&stp
->st_mutex
);
5200 * Checks for sequence id mutating operations.
5203 nfs4_preprocess_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
5204 stateid_t
*stateid
, char typemask
,
5205 struct nfs4_ol_stateid
**stpp
,
5206 struct nfsd_net
*nn
)
5209 struct nfs4_stid
*s
;
5210 struct nfs4_ol_stateid
*stp
= NULL
;
5212 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT
"\n", __func__
,
5213 seqid
, STATEID_VAL(stateid
));
5216 status
= nfsd4_lookup_stateid(cstate
, stateid
, typemask
, &s
, nn
);
5219 stp
= openlockstateid(s
);
5220 nfsd4_cstate_assign_replay(cstate
, stp
->st_stateowner
);
5222 status
= nfs4_seqid_op_checks(cstate
, stateid
, seqid
, stp
);
5226 nfs4_put_stid(&stp
->st_stid
);
5230 static __be32
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
5231 stateid_t
*stateid
, struct nfs4_ol_stateid
**stpp
, struct nfsd_net
*nn
)
5234 struct nfs4_openowner
*oo
;
5235 struct nfs4_ol_stateid
*stp
;
5237 status
= nfs4_preprocess_seqid_op(cstate
, seqid
, stateid
,
5238 NFS4_OPEN_STID
, &stp
, nn
);
5241 oo
= openowner(stp
->st_stateowner
);
5242 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
5243 mutex_unlock(&stp
->st_mutex
);
5244 nfs4_put_stid(&stp
->st_stid
);
5245 return nfserr_bad_stateid
;
5252 nfsd4_open_confirm(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5253 union nfsd4_op_u
*u
)
5255 struct nfsd4_open_confirm
*oc
= &u
->open_confirm
;
5257 struct nfs4_openowner
*oo
;
5258 struct nfs4_ol_stateid
*stp
;
5259 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5261 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5262 cstate
->current_fh
.fh_dentry
);
5264 status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0);
5268 status
= nfs4_preprocess_seqid_op(cstate
,
5269 oc
->oc_seqid
, &oc
->oc_req_stateid
,
5270 NFS4_OPEN_STID
, &stp
, nn
);
5273 oo
= openowner(stp
->st_stateowner
);
5274 status
= nfserr_bad_stateid
;
5275 if (oo
->oo_flags
& NFS4_OO_CONFIRMED
) {
5276 mutex_unlock(&stp
->st_mutex
);
5279 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
5280 nfs4_inc_and_copy_stateid(&oc
->oc_resp_stateid
, &stp
->st_stid
);
5281 mutex_unlock(&stp
->st_mutex
);
5282 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT
"\n",
5283 __func__
, oc
->oc_seqid
, STATEID_VAL(&stp
->st_stid
.sc_stateid
));
5285 nfsd4_client_record_create(oo
->oo_owner
.so_client
);
5288 nfs4_put_stid(&stp
->st_stid
);
5290 nfsd4_bump_seqid(cstate
, status
);
5294 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid
*stp
, u32 access
)
5296 if (!test_access(access
, stp
))
5298 nfs4_file_put_access(stp
->st_stid
.sc_file
, access
);
5299 clear_access(access
, stp
);
5302 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid
*stp
, u32 to_access
)
5304 switch (to_access
) {
5305 case NFS4_SHARE_ACCESS_READ
:
5306 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_WRITE
);
5307 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
5309 case NFS4_SHARE_ACCESS_WRITE
:
5310 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_READ
);
5311 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
5313 case NFS4_SHARE_ACCESS_BOTH
:
5321 nfsd4_open_downgrade(struct svc_rqst
*rqstp
,
5322 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
5324 struct nfsd4_open_downgrade
*od
= &u
->open_downgrade
;
5326 struct nfs4_ol_stateid
*stp
;
5327 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5329 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5330 cstate
->current_fh
.fh_dentry
);
5332 /* We don't yet support WANT bits: */
5333 if (od
->od_deleg_want
)
5334 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__
,
5337 status
= nfs4_preprocess_confirmed_seqid_op(cstate
, od
->od_seqid
,
5338 &od
->od_stateid
, &stp
, nn
);
5341 status
= nfserr_inval
;
5342 if (!test_access(od
->od_share_access
, stp
)) {
5343 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5344 stp
->st_access_bmap
, od
->od_share_access
);
5347 if (!test_deny(od
->od_share_deny
, stp
)) {
5348 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5349 stp
->st_deny_bmap
, od
->od_share_deny
);
5352 nfs4_stateid_downgrade(stp
, od
->od_share_access
);
5353 reset_union_bmap_deny(od
->od_share_deny
, stp
);
5354 nfs4_inc_and_copy_stateid(&od
->od_stateid
, &stp
->st_stid
);
5357 mutex_unlock(&stp
->st_mutex
);
5358 nfs4_put_stid(&stp
->st_stid
);
5360 nfsd4_bump_seqid(cstate
, status
);
5364 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid
*s
)
5366 struct nfs4_client
*clp
= s
->st_stid
.sc_client
;
5368 LIST_HEAD(reaplist
);
5370 s
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
5371 spin_lock(&clp
->cl_lock
);
5372 unhashed
= unhash_open_stateid(s
, &reaplist
);
5374 if (clp
->cl_minorversion
) {
5376 put_ol_stateid_locked(s
, &reaplist
);
5377 spin_unlock(&clp
->cl_lock
);
5378 free_ol_stateid_reaplist(&reaplist
);
5380 spin_unlock(&clp
->cl_lock
);
5381 free_ol_stateid_reaplist(&reaplist
);
5383 move_to_close_lru(s
, clp
->net
);
5388 * nfs4_unlock_state() called after encode
5391 nfsd4_close(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5392 union nfsd4_op_u
*u
)
5394 struct nfsd4_close
*close
= &u
->close
;
5396 struct nfs4_ol_stateid
*stp
;
5397 struct net
*net
= SVC_NET(rqstp
);
5398 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5400 dprintk("NFSD: nfsd4_close on file %pd\n",
5401 cstate
->current_fh
.fh_dentry
);
5403 status
= nfs4_preprocess_seqid_op(cstate
, close
->cl_seqid
,
5405 NFS4_OPEN_STID
|NFS4_CLOSED_STID
,
5407 nfsd4_bump_seqid(cstate
, status
);
5410 nfs4_inc_and_copy_stateid(&close
->cl_stateid
, &stp
->st_stid
);
5411 mutex_unlock(&stp
->st_mutex
);
5413 nfsd4_close_open_stateid(stp
);
5415 /* put reference from nfs4_preprocess_seqid_op */
5416 nfs4_put_stid(&stp
->st_stid
);
5422 nfsd4_delegreturn(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5423 union nfsd4_op_u
*u
)
5425 struct nfsd4_delegreturn
*dr
= &u
->delegreturn
;
5426 struct nfs4_delegation
*dp
;
5427 stateid_t
*stateid
= &dr
->dr_stateid
;
5428 struct nfs4_stid
*s
;
5430 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5432 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
5435 status
= nfsd4_lookup_stateid(cstate
, stateid
, NFS4_DELEG_STID
, &s
, nn
);
5438 dp
= delegstateid(s
);
5439 status
= check_stateid_generation(stateid
, &dp
->dl_stid
.sc_stateid
, nfsd4_has_session(cstate
));
5443 destroy_delegation(dp
);
5445 nfs4_put_stid(&dp
->dl_stid
);
5451 end_offset(u64 start
, u64 len
)
5456 return end
>= start
? end
: NFS4_MAX_UINT64
;
5459 /* last octet in a range */
5461 last_byte_offset(u64 start
, u64 len
)
5467 return end
> start
? end
- 1: NFS4_MAX_UINT64
;
5471 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5472 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5473 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
5474 * locking, this prevents us from being completely protocol-compliant. The
5475 * real solution to this problem is to start using unsigned file offsets in
5476 * the VFS, but this is a very deep change!
5479 nfs4_transform_lock_offset(struct file_lock
*lock
)
5481 if (lock
->fl_start
< 0)
5482 lock
->fl_start
= OFFSET_MAX
;
5483 if (lock
->fl_end
< 0)
5484 lock
->fl_end
= OFFSET_MAX
;
5488 nfsd4_fl_get_owner(fl_owner_t owner
)
5490 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
5492 nfs4_get_stateowner(&lo
->lo_owner
);
5497 nfsd4_fl_put_owner(fl_owner_t owner
)
5499 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
5502 nfs4_put_stateowner(&lo
->lo_owner
);
5506 nfsd4_lm_notify(struct file_lock
*fl
)
5508 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)fl
->fl_owner
;
5509 struct net
*net
= lo
->lo_owner
.so_client
->net
;
5510 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5511 struct nfsd4_blocked_lock
*nbl
= container_of(fl
,
5512 struct nfsd4_blocked_lock
, nbl_lock
);
5515 /* An empty list means that something else is going to be using it */
5516 spin_lock(&nn
->blocked_locks_lock
);
5517 if (!list_empty(&nbl
->nbl_list
)) {
5518 list_del_init(&nbl
->nbl_list
);
5519 list_del_init(&nbl
->nbl_lru
);
5522 spin_unlock(&nn
->blocked_locks_lock
);
5525 nfsd4_run_cb(&nbl
->nbl_cb
);
5528 static const struct lock_manager_operations nfsd_posix_mng_ops
= {
5529 .lm_notify
= nfsd4_lm_notify
,
5530 .lm_get_owner
= nfsd4_fl_get_owner
,
5531 .lm_put_owner
= nfsd4_fl_put_owner
,
5535 nfs4_set_lock_denied(struct file_lock
*fl
, struct nfsd4_lock_denied
*deny
)
5537 struct nfs4_lockowner
*lo
;
5539 if (fl
->fl_lmops
== &nfsd_posix_mng_ops
) {
5540 lo
= (struct nfs4_lockowner
*) fl
->fl_owner
;
5541 deny
->ld_owner
.data
= kmemdup(lo
->lo_owner
.so_owner
.data
,
5542 lo
->lo_owner
.so_owner
.len
, GFP_KERNEL
);
5543 if (!deny
->ld_owner
.data
)
5544 /* We just don't care that much */
5546 deny
->ld_owner
.len
= lo
->lo_owner
.so_owner
.len
;
5547 deny
->ld_clientid
= lo
->lo_owner
.so_client
->cl_clientid
;
5550 deny
->ld_owner
.len
= 0;
5551 deny
->ld_owner
.data
= NULL
;
5552 deny
->ld_clientid
.cl_boot
= 0;
5553 deny
->ld_clientid
.cl_id
= 0;
5555 deny
->ld_start
= fl
->fl_start
;
5556 deny
->ld_length
= NFS4_MAX_UINT64
;
5557 if (fl
->fl_end
!= NFS4_MAX_UINT64
)
5558 deny
->ld_length
= fl
->fl_end
- fl
->fl_start
+ 1;
5559 deny
->ld_type
= NFS4_READ_LT
;
5560 if (fl
->fl_type
!= F_RDLCK
)
5561 deny
->ld_type
= NFS4_WRITE_LT
;
5564 static struct nfs4_lockowner
*
5565 find_lockowner_str_locked(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
5567 unsigned int strhashval
= ownerstr_hashval(owner
);
5568 struct nfs4_stateowner
*so
;
5570 lockdep_assert_held(&clp
->cl_lock
);
5572 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[strhashval
],
5574 if (so
->so_is_open_owner
)
5576 if (same_owner_str(so
, owner
))
5577 return lockowner(nfs4_get_stateowner(so
));
5582 static struct nfs4_lockowner
*
5583 find_lockowner_str(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
5585 struct nfs4_lockowner
*lo
;
5587 spin_lock(&clp
->cl_lock
);
5588 lo
= find_lockowner_str_locked(clp
, owner
);
5589 spin_unlock(&clp
->cl_lock
);
5593 static void nfs4_unhash_lockowner(struct nfs4_stateowner
*sop
)
5595 unhash_lockowner_locked(lockowner(sop
));
5598 static void nfs4_free_lockowner(struct nfs4_stateowner
*sop
)
5600 struct nfs4_lockowner
*lo
= lockowner(sop
);
5602 kmem_cache_free(lockowner_slab
, lo
);
5605 static const struct nfs4_stateowner_operations lockowner_ops
= {
5606 .so_unhash
= nfs4_unhash_lockowner
,
5607 .so_free
= nfs4_free_lockowner
,
5611 * Alloc a lock owner structure.
5612 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5615 * strhashval = ownerstr_hashval
5617 static struct nfs4_lockowner
*
5618 alloc_init_lock_stateowner(unsigned int strhashval
, struct nfs4_client
*clp
,
5619 struct nfs4_ol_stateid
*open_stp
,
5620 struct nfsd4_lock
*lock
)
5622 struct nfs4_lockowner
*lo
, *ret
;
5624 lo
= alloc_stateowner(lockowner_slab
, &lock
->lk_new_owner
, clp
);
5627 INIT_LIST_HEAD(&lo
->lo_blocked
);
5628 INIT_LIST_HEAD(&lo
->lo_owner
.so_stateids
);
5629 lo
->lo_owner
.so_is_open_owner
= 0;
5630 lo
->lo_owner
.so_seqid
= lock
->lk_new_lock_seqid
;
5631 lo
->lo_owner
.so_ops
= &lockowner_ops
;
5632 spin_lock(&clp
->cl_lock
);
5633 ret
= find_lockowner_str_locked(clp
, &lock
->lk_new_owner
);
5635 list_add(&lo
->lo_owner
.so_strhash
,
5636 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
5639 nfs4_free_stateowner(&lo
->lo_owner
);
5641 spin_unlock(&clp
->cl_lock
);
5646 init_lock_stateid(struct nfs4_ol_stateid
*stp
, struct nfs4_lockowner
*lo
,
5647 struct nfs4_file
*fp
, struct inode
*inode
,
5648 struct nfs4_ol_stateid
*open_stp
)
5650 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
5652 lockdep_assert_held(&clp
->cl_lock
);
5654 refcount_inc(&stp
->st_stid
.sc_count
);
5655 stp
->st_stid
.sc_type
= NFS4_LOCK_STID
;
5656 stp
->st_stateowner
= nfs4_get_stateowner(&lo
->lo_owner
);
5658 stp
->st_stid
.sc_file
= fp
;
5659 stp
->st_access_bmap
= 0;
5660 stp
->st_deny_bmap
= open_stp
->st_deny_bmap
;
5661 stp
->st_openstp
= open_stp
;
5662 mutex_init(&stp
->st_mutex
);
5663 list_add(&stp
->st_locks
, &open_stp
->st_locks
);
5664 list_add(&stp
->st_perstateowner
, &lo
->lo_owner
.so_stateids
);
5665 spin_lock(&fp
->fi_lock
);
5666 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
5667 spin_unlock(&fp
->fi_lock
);
5670 static struct nfs4_ol_stateid
*
5671 find_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fp
)
5673 struct nfs4_ol_stateid
*lst
;
5674 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
5676 lockdep_assert_held(&clp
->cl_lock
);
5678 list_for_each_entry(lst
, &lo
->lo_owner
.so_stateids
, st_perstateowner
) {
5679 if (lst
->st_stid
.sc_file
== fp
) {
5680 refcount_inc(&lst
->st_stid
.sc_count
);
5687 static struct nfs4_ol_stateid
*
5688 find_or_create_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fi
,
5689 struct inode
*inode
, struct nfs4_ol_stateid
*ost
,
5692 struct nfs4_stid
*ns
= NULL
;
5693 struct nfs4_ol_stateid
*lst
;
5694 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5695 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
5697 spin_lock(&clp
->cl_lock
);
5698 lst
= find_lock_stateid(lo
, fi
);
5700 spin_unlock(&clp
->cl_lock
);
5701 ns
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_lock_stateid
);
5705 spin_lock(&clp
->cl_lock
);
5706 lst
= find_lock_stateid(lo
, fi
);
5708 lst
= openlockstateid(ns
);
5709 init_lock_stateid(lst
, lo
, fi
, inode
, ost
);
5714 spin_unlock(&clp
->cl_lock
);
5721 check_lock_length(u64 offset
, u64 length
)
5723 return ((length
== 0) || ((length
!= NFS4_MAX_UINT64
) &&
5724 (length
> ~offset
)));
5727 static void get_lock_access(struct nfs4_ol_stateid
*lock_stp
, u32 access
)
5729 struct nfs4_file
*fp
= lock_stp
->st_stid
.sc_file
;
5731 lockdep_assert_held(&fp
->fi_lock
);
5733 if (test_access(access
, lock_stp
))
5735 __nfs4_file_get_access(fp
, access
);
5736 set_access(access
, lock_stp
);
5740 lookup_or_create_lock_state(struct nfsd4_compound_state
*cstate
,
5741 struct nfs4_ol_stateid
*ost
,
5742 struct nfsd4_lock
*lock
,
5743 struct nfs4_ol_stateid
**plst
, bool *new)
5746 struct nfs4_file
*fi
= ost
->st_stid
.sc_file
;
5747 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5748 struct nfs4_client
*cl
= oo
->oo_owner
.so_client
;
5749 struct inode
*inode
= d_inode(cstate
->current_fh
.fh_dentry
);
5750 struct nfs4_lockowner
*lo
;
5751 struct nfs4_ol_stateid
*lst
;
5752 unsigned int strhashval
;
5755 lo
= find_lockowner_str(cl
, &lock
->lk_new_owner
);
5757 strhashval
= ownerstr_hashval(&lock
->lk_new_owner
);
5758 lo
= alloc_init_lock_stateowner(strhashval
, cl
, ost
, lock
);
5760 return nfserr_jukebox
;
5762 /* with an existing lockowner, seqids must be the same */
5763 status
= nfserr_bad_seqid
;
5764 if (!cstate
->minorversion
&&
5765 lock
->lk_new_lock_seqid
!= lo
->lo_owner
.so_seqid
)
5770 lst
= find_or_create_lock_stateid(lo
, fi
, inode
, ost
, new);
5772 status
= nfserr_jukebox
;
5776 mutex_lock(&lst
->st_mutex
);
5778 /* See if it's still hashed to avoid race with FREE_STATEID */
5779 spin_lock(&cl
->cl_lock
);
5780 hashed
= !list_empty(&lst
->st_perfile
);
5781 spin_unlock(&cl
->cl_lock
);
5784 mutex_unlock(&lst
->st_mutex
);
5785 nfs4_put_stid(&lst
->st_stid
);
5791 nfs4_put_stateowner(&lo
->lo_owner
);
5799 nfsd4_lock(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5800 union nfsd4_op_u
*u
)
5802 struct nfsd4_lock
*lock
= &u
->lock
;
5803 struct nfs4_openowner
*open_sop
= NULL
;
5804 struct nfs4_lockowner
*lock_sop
= NULL
;
5805 struct nfs4_ol_stateid
*lock_stp
= NULL
;
5806 struct nfs4_ol_stateid
*open_stp
= NULL
;
5807 struct nfs4_file
*fp
;
5808 struct file
*filp
= NULL
;
5809 struct nfsd4_blocked_lock
*nbl
= NULL
;
5810 struct file_lock
*file_lock
= NULL
;
5811 struct file_lock
*conflock
= NULL
;
5816 unsigned char fl_type
;
5817 unsigned int fl_flags
= FL_POSIX
;
5818 struct net
*net
= SVC_NET(rqstp
);
5819 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5821 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5822 (long long) lock
->lk_offset
,
5823 (long long) lock
->lk_length
);
5825 if (check_lock_length(lock
->lk_offset
, lock
->lk_length
))
5826 return nfserr_inval
;
5828 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
,
5829 S_IFREG
, NFSD_MAY_LOCK
))) {
5830 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5834 if (lock
->lk_is_new
) {
5835 if (nfsd4_has_session(cstate
))
5836 /* See rfc 5661 18.10.3: given clientid is ignored: */
5837 memcpy(&lock
->lk_new_clientid
,
5838 &cstate
->session
->se_client
->cl_clientid
,
5839 sizeof(clientid_t
));
5841 status
= nfserr_stale_clientid
;
5842 if (STALE_CLIENTID(&lock
->lk_new_clientid
, nn
))
5845 /* validate and update open stateid and open seqid */
5846 status
= nfs4_preprocess_confirmed_seqid_op(cstate
,
5847 lock
->lk_new_open_seqid
,
5848 &lock
->lk_new_open_stateid
,
5852 mutex_unlock(&open_stp
->st_mutex
);
5853 open_sop
= openowner(open_stp
->st_stateowner
);
5854 status
= nfserr_bad_stateid
;
5855 if (!same_clid(&open_sop
->oo_owner
.so_client
->cl_clientid
,
5856 &lock
->lk_new_clientid
))
5858 status
= lookup_or_create_lock_state(cstate
, open_stp
, lock
,
5861 status
= nfs4_preprocess_seqid_op(cstate
,
5862 lock
->lk_old_lock_seqid
,
5863 &lock
->lk_old_lock_stateid
,
5864 NFS4_LOCK_STID
, &lock_stp
, nn
);
5868 lock_sop
= lockowner(lock_stp
->st_stateowner
);
5870 lkflg
= setlkflg(lock
->lk_type
);
5871 status
= nfs4_check_openmode(lock_stp
, lkflg
);
5875 status
= nfserr_grace
;
5876 if (locks_in_grace(net
) && !lock
->lk_reclaim
)
5878 status
= nfserr_no_grace
;
5879 if (!locks_in_grace(net
) && lock
->lk_reclaim
)
5882 fp
= lock_stp
->st_stid
.sc_file
;
5883 switch (lock
->lk_type
) {
5885 if (nfsd4_has_session(cstate
))
5886 fl_flags
|= FL_SLEEP
;
5889 spin_lock(&fp
->fi_lock
);
5890 filp
= find_readable_file_locked(fp
);
5892 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_READ
);
5893 spin_unlock(&fp
->fi_lock
);
5896 case NFS4_WRITEW_LT
:
5897 if (nfsd4_has_session(cstate
))
5898 fl_flags
|= FL_SLEEP
;
5901 spin_lock(&fp
->fi_lock
);
5902 filp
= find_writeable_file_locked(fp
);
5904 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_WRITE
);
5905 spin_unlock(&fp
->fi_lock
);
5909 status
= nfserr_inval
;
5914 status
= nfserr_openmode
;
5918 nbl
= find_or_allocate_block(lock_sop
, &fp
->fi_fhandle
, nn
);
5920 dprintk("NFSD: %s: unable to allocate block!\n", __func__
);
5921 status
= nfserr_jukebox
;
5925 file_lock
= &nbl
->nbl_lock
;
5926 file_lock
->fl_type
= fl_type
;
5927 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(&lock_sop
->lo_owner
));
5928 file_lock
->fl_pid
= current
->tgid
;
5929 file_lock
->fl_file
= filp
;
5930 file_lock
->fl_flags
= fl_flags
;
5931 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
5932 file_lock
->fl_start
= lock
->lk_offset
;
5933 file_lock
->fl_end
= last_byte_offset(lock
->lk_offset
, lock
->lk_length
);
5934 nfs4_transform_lock_offset(file_lock
);
5936 conflock
= locks_alloc_lock();
5938 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5939 status
= nfserr_jukebox
;
5943 if (fl_flags
& FL_SLEEP
) {
5944 nbl
->nbl_time
= jiffies
;
5945 spin_lock(&nn
->blocked_locks_lock
);
5946 list_add_tail(&nbl
->nbl_list
, &lock_sop
->lo_blocked
);
5947 list_add_tail(&nbl
->nbl_lru
, &nn
->blocked_locks_lru
);
5948 spin_unlock(&nn
->blocked_locks_lock
);
5951 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, conflock
);
5953 case 0: /* success! */
5954 nfs4_inc_and_copy_stateid(&lock
->lk_resp_stateid
, &lock_stp
->st_stid
);
5957 case FILE_LOCK_DEFERRED
:
5960 case -EAGAIN
: /* conflock holds conflicting lock */
5961 status
= nfserr_denied
;
5962 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5963 nfs4_set_lock_denied(conflock
, &lock
->lk_denied
);
5966 status
= nfserr_deadlock
;
5969 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err
);
5970 status
= nfserrno(err
);
5975 /* dequeue it if we queued it before */
5976 if (fl_flags
& FL_SLEEP
) {
5977 spin_lock(&nn
->blocked_locks_lock
);
5978 list_del_init(&nbl
->nbl_list
);
5979 list_del_init(&nbl
->nbl_lru
);
5980 spin_unlock(&nn
->blocked_locks_lock
);
5982 free_blocked_lock(nbl
);
5987 /* Bump seqid manually if the 4.0 replay owner is openowner */
5988 if (cstate
->replay_owner
&&
5989 cstate
->replay_owner
!= &lock_sop
->lo_owner
&&
5990 seqid_mutating_err(ntohl(status
)))
5991 lock_sop
->lo_owner
.so_seqid
++;
5993 mutex_unlock(&lock_stp
->st_mutex
);
5996 * If this is a new, never-before-used stateid, and we are
5997 * returning an error, then just go ahead and release it.
6000 release_lock_stateid(lock_stp
);
6002 nfs4_put_stid(&lock_stp
->st_stid
);
6005 nfs4_put_stid(&open_stp
->st_stid
);
6006 nfsd4_bump_seqid(cstate
, status
);
6008 locks_free_lock(conflock
);
6013 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6014 * so we do a temporary open here just to get an open file to pass to
6015 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6018 static __be32
nfsd_test_lock(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct file_lock
*lock
)
6021 __be32 err
= nfsd_open(rqstp
, fhp
, S_IFREG
, NFSD_MAY_READ
, &file
);
6023 err
= nfserrno(vfs_test_lock(file
, lock
));
6033 nfsd4_lockt(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6034 union nfsd4_op_u
*u
)
6036 struct nfsd4_lockt
*lockt
= &u
->lockt
;
6037 struct file_lock
*file_lock
= NULL
;
6038 struct nfs4_lockowner
*lo
= NULL
;
6040 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6042 if (locks_in_grace(SVC_NET(rqstp
)))
6043 return nfserr_grace
;
6045 if (check_lock_length(lockt
->lt_offset
, lockt
->lt_length
))
6046 return nfserr_inval
;
6048 if (!nfsd4_has_session(cstate
)) {
6049 status
= lookup_clientid(&lockt
->lt_clientid
, cstate
, nn
);
6054 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
6057 file_lock
= locks_alloc_lock();
6059 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6060 status
= nfserr_jukebox
;
6064 switch (lockt
->lt_type
) {
6067 file_lock
->fl_type
= F_RDLCK
;
6070 case NFS4_WRITEW_LT
:
6071 file_lock
->fl_type
= F_WRLCK
;
6074 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6075 status
= nfserr_inval
;
6079 lo
= find_lockowner_str(cstate
->clp
, &lockt
->lt_owner
);
6081 file_lock
->fl_owner
= (fl_owner_t
)lo
;
6082 file_lock
->fl_pid
= current
->tgid
;
6083 file_lock
->fl_flags
= FL_POSIX
;
6085 file_lock
->fl_start
= lockt
->lt_offset
;
6086 file_lock
->fl_end
= last_byte_offset(lockt
->lt_offset
, lockt
->lt_length
);
6088 nfs4_transform_lock_offset(file_lock
);
6090 status
= nfsd_test_lock(rqstp
, &cstate
->current_fh
, file_lock
);
6094 if (file_lock
->fl_type
!= F_UNLCK
) {
6095 status
= nfserr_denied
;
6096 nfs4_set_lock_denied(file_lock
, &lockt
->lt_denied
);
6100 nfs4_put_stateowner(&lo
->lo_owner
);
6102 locks_free_lock(file_lock
);
6107 nfsd4_locku(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6108 union nfsd4_op_u
*u
)
6110 struct nfsd4_locku
*locku
= &u
->locku
;
6111 struct nfs4_ol_stateid
*stp
;
6112 struct file
*filp
= NULL
;
6113 struct file_lock
*file_lock
= NULL
;
6116 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6118 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6119 (long long) locku
->lu_offset
,
6120 (long long) locku
->lu_length
);
6122 if (check_lock_length(locku
->lu_offset
, locku
->lu_length
))
6123 return nfserr_inval
;
6125 status
= nfs4_preprocess_seqid_op(cstate
, locku
->lu_seqid
,
6126 &locku
->lu_stateid
, NFS4_LOCK_STID
,
6130 filp
= find_any_file(stp
->st_stid
.sc_file
);
6132 status
= nfserr_lock_range
;
6135 file_lock
= locks_alloc_lock();
6137 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6138 status
= nfserr_jukebox
;
6142 file_lock
->fl_type
= F_UNLCK
;
6143 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(stp
->st_stateowner
));
6144 file_lock
->fl_pid
= current
->tgid
;
6145 file_lock
->fl_file
= filp
;
6146 file_lock
->fl_flags
= FL_POSIX
;
6147 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
6148 file_lock
->fl_start
= locku
->lu_offset
;
6150 file_lock
->fl_end
= last_byte_offset(locku
->lu_offset
,
6152 nfs4_transform_lock_offset(file_lock
);
6154 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, NULL
);
6156 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6159 nfs4_inc_and_copy_stateid(&locku
->lu_stateid
, &stp
->st_stid
);
6163 mutex_unlock(&stp
->st_mutex
);
6164 nfs4_put_stid(&stp
->st_stid
);
6166 nfsd4_bump_seqid(cstate
, status
);
6168 locks_free_lock(file_lock
);
6172 status
= nfserrno(err
);
6178 * true: locks held by lockowner
6179 * false: no locks held by lockowner
6182 check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
)
6184 struct file_lock
*fl
;
6186 struct file
*filp
= find_any_file(fp
);
6187 struct inode
*inode
;
6188 struct file_lock_context
*flctx
;
6191 /* Any valid lock stateid should have some sort of access */
6196 inode
= file_inode(filp
);
6197 flctx
= inode
->i_flctx
;
6199 if (flctx
&& !list_empty_careful(&flctx
->flc_posix
)) {
6200 spin_lock(&flctx
->flc_lock
);
6201 list_for_each_entry(fl
, &flctx
->flc_posix
, fl_list
) {
6202 if (fl
->fl_owner
== (fl_owner_t
)lowner
) {
6207 spin_unlock(&flctx
->flc_lock
);
6214 nfsd4_release_lockowner(struct svc_rqst
*rqstp
,
6215 struct nfsd4_compound_state
*cstate
,
6216 union nfsd4_op_u
*u
)
6218 struct nfsd4_release_lockowner
*rlockowner
= &u
->release_lockowner
;
6219 clientid_t
*clid
= &rlockowner
->rl_clientid
;
6220 struct nfs4_stateowner
*sop
;
6221 struct nfs4_lockowner
*lo
= NULL
;
6222 struct nfs4_ol_stateid
*stp
;
6223 struct xdr_netobj
*owner
= &rlockowner
->rl_owner
;
6224 unsigned int hashval
= ownerstr_hashval(owner
);
6226 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6227 struct nfs4_client
*clp
;
6228 LIST_HEAD (reaplist
);
6230 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6231 clid
->cl_boot
, clid
->cl_id
);
6233 status
= lookup_clientid(clid
, cstate
, nn
);
6238 /* Find the matching lock stateowner */
6239 spin_lock(&clp
->cl_lock
);
6240 list_for_each_entry(sop
, &clp
->cl_ownerstr_hashtbl
[hashval
],
6243 if (sop
->so_is_open_owner
|| !same_owner_str(sop
, owner
))
6246 /* see if there are still any locks associated with it */
6247 lo
= lockowner(sop
);
6248 list_for_each_entry(stp
, &sop
->so_stateids
, st_perstateowner
) {
6249 if (check_for_locks(stp
->st_stid
.sc_file
, lo
)) {
6250 status
= nfserr_locks_held
;
6251 spin_unlock(&clp
->cl_lock
);
6256 nfs4_get_stateowner(sop
);
6260 spin_unlock(&clp
->cl_lock
);
6264 unhash_lockowner_locked(lo
);
6265 while (!list_empty(&lo
->lo_owner
.so_stateids
)) {
6266 stp
= list_first_entry(&lo
->lo_owner
.so_stateids
,
6267 struct nfs4_ol_stateid
,
6269 WARN_ON(!unhash_lock_stateid(stp
));
6270 put_ol_stateid_locked(stp
, &reaplist
);
6272 spin_unlock(&clp
->cl_lock
);
6273 free_ol_stateid_reaplist(&reaplist
);
6274 nfs4_put_stateowner(&lo
->lo_owner
);
6279 static inline struct nfs4_client_reclaim
*
6282 return kmalloc(sizeof(struct nfs4_client_reclaim
), GFP_KERNEL
);
6286 nfs4_has_reclaimed_state(const char *name
, struct nfsd_net
*nn
)
6288 struct nfs4_client_reclaim
*crp
;
6290 crp
= nfsd4_find_reclaim_client(name
, nn
);
6291 return (crp
&& crp
->cr_clp
);
6295 * failure => all reset bets are off, nfserr_no_grace...
6297 struct nfs4_client_reclaim
*
6298 nfs4_client_to_reclaim(const char *name
, struct nfsd_net
*nn
)
6300 unsigned int strhashval
;
6301 struct nfs4_client_reclaim
*crp
;
6303 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN
, name
);
6304 crp
= alloc_reclaim();
6306 strhashval
= clientstr_hashval(name
);
6307 INIT_LIST_HEAD(&crp
->cr_strhash
);
6308 list_add(&crp
->cr_strhash
, &nn
->reclaim_str_hashtbl
[strhashval
]);
6309 memcpy(crp
->cr_recdir
, name
, HEXDIR_LEN
);
6311 nn
->reclaim_str_hashtbl_size
++;
6317 nfs4_remove_reclaim_record(struct nfs4_client_reclaim
*crp
, struct nfsd_net
*nn
)
6319 list_del(&crp
->cr_strhash
);
6321 nn
->reclaim_str_hashtbl_size
--;
6325 nfs4_release_reclaim(struct nfsd_net
*nn
)
6327 struct nfs4_client_reclaim
*crp
= NULL
;
6330 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6331 while (!list_empty(&nn
->reclaim_str_hashtbl
[i
])) {
6332 crp
= list_entry(nn
->reclaim_str_hashtbl
[i
].next
,
6333 struct nfs4_client_reclaim
, cr_strhash
);
6334 nfs4_remove_reclaim_record(crp
, nn
);
6337 WARN_ON_ONCE(nn
->reclaim_str_hashtbl_size
);
6341 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6342 struct nfs4_client_reclaim
*
6343 nfsd4_find_reclaim_client(const char *recdir
, struct nfsd_net
*nn
)
6345 unsigned int strhashval
;
6346 struct nfs4_client_reclaim
*crp
= NULL
;
6348 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir
);
6350 strhashval
= clientstr_hashval(recdir
);
6351 list_for_each_entry(crp
, &nn
->reclaim_str_hashtbl
[strhashval
], cr_strhash
) {
6352 if (same_name(crp
->cr_recdir
, recdir
)) {
6360 * Called from OPEN. Look for clientid in reclaim list.
6363 nfs4_check_open_reclaim(clientid_t
*clid
,
6364 struct nfsd4_compound_state
*cstate
,
6365 struct nfsd_net
*nn
)
6369 /* find clientid in conf_id_hashtbl */
6370 status
= lookup_clientid(clid
, cstate
, nn
);
6372 return nfserr_reclaim_bad
;
6374 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
, &cstate
->clp
->cl_flags
))
6375 return nfserr_no_grace
;
6377 if (nfsd4_client_record_check(cstate
->clp
))
6378 return nfserr_reclaim_bad
;
6383 #ifdef CONFIG_NFSD_FAULT_INJECTION
6385 put_client(struct nfs4_client
*clp
)
6387 atomic_dec(&clp
->cl_refcount
);
6390 static struct nfs4_client
*
6391 nfsd_find_client(struct sockaddr_storage
*addr
, size_t addr_size
)
6393 struct nfs4_client
*clp
;
6394 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6397 if (!nfsd_netns_ready(nn
))
6400 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6401 if (memcmp(&clp
->cl_addr
, addr
, addr_size
) == 0)
6408 nfsd_inject_print_clients(void)
6410 struct nfs4_client
*clp
;
6412 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6414 char buf
[INET6_ADDRSTRLEN
];
6416 if (!nfsd_netns_ready(nn
))
6419 spin_lock(&nn
->client_lock
);
6420 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6421 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
6422 pr_info("NFS Client: %s\n", buf
);
6425 spin_unlock(&nn
->client_lock
);
6431 nfsd_inject_forget_client(struct sockaddr_storage
*addr
, size_t addr_size
)
6434 struct nfs4_client
*clp
;
6435 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6438 if (!nfsd_netns_ready(nn
))
6441 spin_lock(&nn
->client_lock
);
6442 clp
= nfsd_find_client(addr
, addr_size
);
6444 if (mark_client_expired_locked(clp
) == nfs_ok
)
6449 spin_unlock(&nn
->client_lock
);
6458 nfsd_inject_forget_clients(u64 max
)
6461 struct nfs4_client
*clp
, *next
;
6462 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6464 LIST_HEAD(reaplist
);
6466 if (!nfsd_netns_ready(nn
))
6469 spin_lock(&nn
->client_lock
);
6470 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
6471 if (mark_client_expired_locked(clp
) == nfs_ok
) {
6472 list_add(&clp
->cl_lru
, &reaplist
);
6473 if (max
!= 0 && ++count
>= max
)
6477 spin_unlock(&nn
->client_lock
);
6479 list_for_each_entry_safe(clp
, next
, &reaplist
, cl_lru
)
6485 static void nfsd_print_count(struct nfs4_client
*clp
, unsigned int count
,
6488 char buf
[INET6_ADDRSTRLEN
];
6489 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
6490 printk(KERN_INFO
"NFS Client: %s has %u %s\n", buf
, count
, type
);
6494 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid
*lst
,
6495 struct list_head
*collect
)
6497 struct nfs4_client
*clp
= lst
->st_stid
.sc_client
;
6498 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6504 lockdep_assert_held(&nn
->client_lock
);
6505 atomic_inc(&clp
->cl_refcount
);
6506 list_add(&lst
->st_locks
, collect
);
6509 static u64
nfsd_foreach_client_lock(struct nfs4_client
*clp
, u64 max
,
6510 struct list_head
*collect
,
6511 bool (*func
)(struct nfs4_ol_stateid
*))
6513 struct nfs4_openowner
*oop
;
6514 struct nfs4_ol_stateid
*stp
, *st_next
;
6515 struct nfs4_ol_stateid
*lst
, *lst_next
;
6518 spin_lock(&clp
->cl_lock
);
6519 list_for_each_entry(oop
, &clp
->cl_openowners
, oo_perclient
) {
6520 list_for_each_entry_safe(stp
, st_next
,
6521 &oop
->oo_owner
.so_stateids
, st_perstateowner
) {
6522 list_for_each_entry_safe(lst
, lst_next
,
6523 &stp
->st_locks
, st_locks
) {
6526 nfsd_inject_add_lock_to_list(lst
,
6531 * Despite the fact that these functions deal
6532 * with 64-bit integers for "count", we must
6533 * ensure that it doesn't blow up the
6534 * clp->cl_refcount. Throw a warning if we
6535 * start to approach INT_MAX here.
6537 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6544 spin_unlock(&clp
->cl_lock
);
6550 nfsd_collect_client_locks(struct nfs4_client
*clp
, struct list_head
*collect
,
6553 return nfsd_foreach_client_lock(clp
, max
, collect
, unhash_lock_stateid
);
6557 nfsd_print_client_locks(struct nfs4_client
*clp
)
6559 u64 count
= nfsd_foreach_client_lock(clp
, 0, NULL
, NULL
);
6560 nfsd_print_count(clp
, count
, "locked files");
6565 nfsd_inject_print_locks(void)
6567 struct nfs4_client
*clp
;
6569 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6572 if (!nfsd_netns_ready(nn
))
6575 spin_lock(&nn
->client_lock
);
6576 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6577 count
+= nfsd_print_client_locks(clp
);
6578 spin_unlock(&nn
->client_lock
);
6584 nfsd_reap_locks(struct list_head
*reaplist
)
6586 struct nfs4_client
*clp
;
6587 struct nfs4_ol_stateid
*stp
, *next
;
6589 list_for_each_entry_safe(stp
, next
, reaplist
, st_locks
) {
6590 list_del_init(&stp
->st_locks
);
6591 clp
= stp
->st_stid
.sc_client
;
6592 nfs4_put_stid(&stp
->st_stid
);
6598 nfsd_inject_forget_client_locks(struct sockaddr_storage
*addr
, size_t addr_size
)
6600 unsigned int count
= 0;
6601 struct nfs4_client
*clp
;
6602 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6604 LIST_HEAD(reaplist
);
6606 if (!nfsd_netns_ready(nn
))
6609 spin_lock(&nn
->client_lock
);
6610 clp
= nfsd_find_client(addr
, addr_size
);
6612 count
= nfsd_collect_client_locks(clp
, &reaplist
, 0);
6613 spin_unlock(&nn
->client_lock
);
6614 nfsd_reap_locks(&reaplist
);
6619 nfsd_inject_forget_locks(u64 max
)
6622 struct nfs4_client
*clp
;
6623 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6625 LIST_HEAD(reaplist
);
6627 if (!nfsd_netns_ready(nn
))
6630 spin_lock(&nn
->client_lock
);
6631 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6632 count
+= nfsd_collect_client_locks(clp
, &reaplist
, max
- count
);
6633 if (max
!= 0 && count
>= max
)
6636 spin_unlock(&nn
->client_lock
);
6637 nfsd_reap_locks(&reaplist
);
6642 nfsd_foreach_client_openowner(struct nfs4_client
*clp
, u64 max
,
6643 struct list_head
*collect
,
6644 void (*func
)(struct nfs4_openowner
*))
6646 struct nfs4_openowner
*oop
, *next
;
6647 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6651 lockdep_assert_held(&nn
->client_lock
);
6653 spin_lock(&clp
->cl_lock
);
6654 list_for_each_entry_safe(oop
, next
, &clp
->cl_openowners
, oo_perclient
) {
6658 atomic_inc(&clp
->cl_refcount
);
6659 list_add(&oop
->oo_perclient
, collect
);
6664 * Despite the fact that these functions deal with
6665 * 64-bit integers for "count", we must ensure that
6666 * it doesn't blow up the clp->cl_refcount. Throw a
6667 * warning if we start to approach INT_MAX here.
6669 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6673 spin_unlock(&clp
->cl_lock
);
6679 nfsd_print_client_openowners(struct nfs4_client
*clp
)
6681 u64 count
= nfsd_foreach_client_openowner(clp
, 0, NULL
, NULL
);
6683 nfsd_print_count(clp
, count
, "openowners");
6688 nfsd_collect_client_openowners(struct nfs4_client
*clp
,
6689 struct list_head
*collect
, u64 max
)
6691 return nfsd_foreach_client_openowner(clp
, max
, collect
,
6692 unhash_openowner_locked
);
6696 nfsd_inject_print_openowners(void)
6698 struct nfs4_client
*clp
;
6700 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6703 if (!nfsd_netns_ready(nn
))
6706 spin_lock(&nn
->client_lock
);
6707 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6708 count
+= nfsd_print_client_openowners(clp
);
6709 spin_unlock(&nn
->client_lock
);
6715 nfsd_reap_openowners(struct list_head
*reaplist
)
6717 struct nfs4_client
*clp
;
6718 struct nfs4_openowner
*oop
, *next
;
6720 list_for_each_entry_safe(oop
, next
, reaplist
, oo_perclient
) {
6721 list_del_init(&oop
->oo_perclient
);
6722 clp
= oop
->oo_owner
.so_client
;
6723 release_openowner(oop
);
6729 nfsd_inject_forget_client_openowners(struct sockaddr_storage
*addr
,
6732 unsigned int count
= 0;
6733 struct nfs4_client
*clp
;
6734 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6736 LIST_HEAD(reaplist
);
6738 if (!nfsd_netns_ready(nn
))
6741 spin_lock(&nn
->client_lock
);
6742 clp
= nfsd_find_client(addr
, addr_size
);
6744 count
= nfsd_collect_client_openowners(clp
, &reaplist
, 0);
6745 spin_unlock(&nn
->client_lock
);
6746 nfsd_reap_openowners(&reaplist
);
6751 nfsd_inject_forget_openowners(u64 max
)
6754 struct nfs4_client
*clp
;
6755 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6757 LIST_HEAD(reaplist
);
6759 if (!nfsd_netns_ready(nn
))
6762 spin_lock(&nn
->client_lock
);
6763 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6764 count
+= nfsd_collect_client_openowners(clp
, &reaplist
,
6766 if (max
!= 0 && count
>= max
)
6769 spin_unlock(&nn
->client_lock
);
6770 nfsd_reap_openowners(&reaplist
);
6774 static u64
nfsd_find_all_delegations(struct nfs4_client
*clp
, u64 max
,
6775 struct list_head
*victims
)
6777 struct nfs4_delegation
*dp
, *next
;
6778 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6782 lockdep_assert_held(&nn
->client_lock
);
6784 spin_lock(&state_lock
);
6785 list_for_each_entry_safe(dp
, next
, &clp
->cl_delegations
, dl_perclnt
) {
6788 * It's not safe to mess with delegations that have a
6789 * non-zero dl_time. They might have already been broken
6790 * and could be processed by the laundromat outside of
6791 * the state_lock. Just leave them be.
6793 if (dp
->dl_time
!= 0)
6796 atomic_inc(&clp
->cl_refcount
);
6797 WARN_ON(!unhash_delegation_locked(dp
));
6798 list_add(&dp
->dl_recall_lru
, victims
);
6802 * Despite the fact that these functions deal with
6803 * 64-bit integers for "count", we must ensure that
6804 * it doesn't blow up the clp->cl_refcount. Throw a
6805 * warning if we start to approach INT_MAX here.
6807 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6811 spin_unlock(&state_lock
);
6816 nfsd_print_client_delegations(struct nfs4_client
*clp
)
6818 u64 count
= nfsd_find_all_delegations(clp
, 0, NULL
);
6820 nfsd_print_count(clp
, count
, "delegations");
6825 nfsd_inject_print_delegations(void)
6827 struct nfs4_client
*clp
;
6829 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6832 if (!nfsd_netns_ready(nn
))
6835 spin_lock(&nn
->client_lock
);
6836 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6837 count
+= nfsd_print_client_delegations(clp
);
6838 spin_unlock(&nn
->client_lock
);
6844 nfsd_forget_delegations(struct list_head
*reaplist
)
6846 struct nfs4_client
*clp
;
6847 struct nfs4_delegation
*dp
, *next
;
6849 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
6850 list_del_init(&dp
->dl_recall_lru
);
6851 clp
= dp
->dl_stid
.sc_client
;
6852 revoke_delegation(dp
);
6858 nfsd_inject_forget_client_delegations(struct sockaddr_storage
*addr
,
6862 struct nfs4_client
*clp
;
6863 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6865 LIST_HEAD(reaplist
);
6867 if (!nfsd_netns_ready(nn
))
6870 spin_lock(&nn
->client_lock
);
6871 clp
= nfsd_find_client(addr
, addr_size
);
6873 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
6874 spin_unlock(&nn
->client_lock
);
6876 nfsd_forget_delegations(&reaplist
);
6881 nfsd_inject_forget_delegations(u64 max
)
6884 struct nfs4_client
*clp
;
6885 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6887 LIST_HEAD(reaplist
);
6889 if (!nfsd_netns_ready(nn
))
6892 spin_lock(&nn
->client_lock
);
6893 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6894 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
6895 if (max
!= 0 && count
>= max
)
6898 spin_unlock(&nn
->client_lock
);
6899 nfsd_forget_delegations(&reaplist
);
6904 nfsd_recall_delegations(struct list_head
*reaplist
)
6906 struct nfs4_client
*clp
;
6907 struct nfs4_delegation
*dp
, *next
;
6909 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
6910 list_del_init(&dp
->dl_recall_lru
);
6911 clp
= dp
->dl_stid
.sc_client
;
6913 * We skipped all entries that had a zero dl_time before,
6914 * so we can now reset the dl_time back to 0. If a delegation
6915 * break comes in now, then it won't make any difference since
6916 * we're recalling it either way.
6918 spin_lock(&state_lock
);
6920 spin_unlock(&state_lock
);
6921 nfsd_break_one_deleg(dp
);
6927 nfsd_inject_recall_client_delegations(struct sockaddr_storage
*addr
,
6931 struct nfs4_client
*clp
;
6932 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6934 LIST_HEAD(reaplist
);
6936 if (!nfsd_netns_ready(nn
))
6939 spin_lock(&nn
->client_lock
);
6940 clp
= nfsd_find_client(addr
, addr_size
);
6942 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
6943 spin_unlock(&nn
->client_lock
);
6945 nfsd_recall_delegations(&reaplist
);
6950 nfsd_inject_recall_delegations(u64 max
)
6953 struct nfs4_client
*clp
, *next
;
6954 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6956 LIST_HEAD(reaplist
);
6958 if (!nfsd_netns_ready(nn
))
6961 spin_lock(&nn
->client_lock
);
6962 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
6963 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
6964 if (max
!= 0 && ++count
>= max
)
6967 spin_unlock(&nn
->client_lock
);
6968 nfsd_recall_delegations(&reaplist
);
6971 #endif /* CONFIG_NFSD_FAULT_INJECTION */
6974 * Since the lifetime of a delegation isn't limited to that of an open, a
6975 * client may quite reasonably hang on to a delegation as long as it has
6976 * the inode cached. This becomes an obvious problem the first time a
6977 * client's inode cache approaches the size of the server's total memory.
6979 * For now we avoid this problem by imposing a hard limit on the number
6980 * of delegations, which varies according to the server's memory size.
6983 set_max_delegations(void)
6986 * Allow at most 4 delegations per megabyte of RAM. Quick
6987 * estimates suggest that in the worst case (where every delegation
6988 * is for a different inode), a delegation could take about 1.5K,
6989 * giving a worst case usage of about 6% of memory.
6991 max_delegations
= nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT
);
6994 static int nfs4_state_create_net(struct net
*net
)
6996 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6999 nn
->conf_id_hashtbl
= kmalloc(sizeof(struct list_head
) *
7000 CLIENT_HASH_SIZE
, GFP_KERNEL
);
7001 if (!nn
->conf_id_hashtbl
)
7003 nn
->unconf_id_hashtbl
= kmalloc(sizeof(struct list_head
) *
7004 CLIENT_HASH_SIZE
, GFP_KERNEL
);
7005 if (!nn
->unconf_id_hashtbl
)
7007 nn
->sessionid_hashtbl
= kmalloc(sizeof(struct list_head
) *
7008 SESSION_HASH_SIZE
, GFP_KERNEL
);
7009 if (!nn
->sessionid_hashtbl
)
7012 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7013 INIT_LIST_HEAD(&nn
->conf_id_hashtbl
[i
]);
7014 INIT_LIST_HEAD(&nn
->unconf_id_hashtbl
[i
]);
7016 for (i
= 0; i
< SESSION_HASH_SIZE
; i
++)
7017 INIT_LIST_HEAD(&nn
->sessionid_hashtbl
[i
]);
7018 nn
->conf_name_tree
= RB_ROOT
;
7019 nn
->unconf_name_tree
= RB_ROOT
;
7020 INIT_LIST_HEAD(&nn
->client_lru
);
7021 INIT_LIST_HEAD(&nn
->close_lru
);
7022 INIT_LIST_HEAD(&nn
->del_recall_lru
);
7023 spin_lock_init(&nn
->client_lock
);
7025 spin_lock_init(&nn
->blocked_locks_lock
);
7026 INIT_LIST_HEAD(&nn
->blocked_locks_lru
);
7028 INIT_DELAYED_WORK(&nn
->laundromat_work
, laundromat_main
);
7034 kfree(nn
->unconf_id_hashtbl
);
7036 kfree(nn
->conf_id_hashtbl
);
7042 nfs4_state_destroy_net(struct net
*net
)
7045 struct nfs4_client
*clp
= NULL
;
7046 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7048 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7049 while (!list_empty(&nn
->conf_id_hashtbl
[i
])) {
7050 clp
= list_entry(nn
->conf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
7051 destroy_client(clp
);
7055 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7056 while (!list_empty(&nn
->unconf_id_hashtbl
[i
])) {
7057 clp
= list_entry(nn
->unconf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
7058 destroy_client(clp
);
7062 kfree(nn
->sessionid_hashtbl
);
7063 kfree(nn
->unconf_id_hashtbl
);
7064 kfree(nn
->conf_id_hashtbl
);
7069 nfs4_state_start_net(struct net
*net
)
7071 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7074 ret
= nfs4_state_create_net(net
);
7077 nn
->boot_time
= get_seconds();
7078 nn
->grace_ended
= false;
7079 nn
->nfsd4_manager
.block_opens
= true;
7080 locks_start_grace(net
, &nn
->nfsd4_manager
);
7081 nfsd4_client_tracking_init(net
);
7082 printk(KERN_INFO
"NFSD: starting %ld-second grace period (net %x)\n",
7083 nn
->nfsd4_grace
, net
->ns
.inum
);
7084 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, nn
->nfsd4_grace
* HZ
);
7088 /* initialization to perform when the nfsd service is started: */
7091 nfs4_state_start(void)
7095 ret
= set_callback_cred();
7099 laundry_wq
= alloc_workqueue("%s", WQ_UNBOUND
, 0, "nfsd4");
7100 if (laundry_wq
== NULL
) {
7102 goto out_cleanup_cred
;
7104 ret
= nfsd4_create_callback_queue();
7106 goto out_free_laundry
;
7108 set_max_delegations();
7112 destroy_workqueue(laundry_wq
);
7114 cleanup_callback_cred();
7119 nfs4_state_shutdown_net(struct net
*net
)
7121 struct nfs4_delegation
*dp
= NULL
;
7122 struct list_head
*pos
, *next
, reaplist
;
7123 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7124 struct nfsd4_blocked_lock
*nbl
;
7126 cancel_delayed_work_sync(&nn
->laundromat_work
);
7127 locks_end_grace(&nn
->nfsd4_manager
);
7129 INIT_LIST_HEAD(&reaplist
);
7130 spin_lock(&state_lock
);
7131 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
7132 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
7133 WARN_ON(!unhash_delegation_locked(dp
));
7134 list_add(&dp
->dl_recall_lru
, &reaplist
);
7136 spin_unlock(&state_lock
);
7137 list_for_each_safe(pos
, next
, &reaplist
) {
7138 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
7139 list_del_init(&dp
->dl_recall_lru
);
7140 put_clnt_odstate(dp
->dl_clnt_odstate
);
7141 nfs4_put_deleg_lease(dp
->dl_stid
.sc_file
);
7142 nfs4_put_stid(&dp
->dl_stid
);
7145 BUG_ON(!list_empty(&reaplist
));
7146 spin_lock(&nn
->blocked_locks_lock
);
7147 while (!list_empty(&nn
->blocked_locks_lru
)) {
7148 nbl
= list_first_entry(&nn
->blocked_locks_lru
,
7149 struct nfsd4_blocked_lock
, nbl_lru
);
7150 list_move(&nbl
->nbl_lru
, &reaplist
);
7151 list_del_init(&nbl
->nbl_list
);
7153 spin_unlock(&nn
->blocked_locks_lock
);
7155 while (!list_empty(&reaplist
)) {
7156 nbl
= list_first_entry(&nn
->blocked_locks_lru
,
7157 struct nfsd4_blocked_lock
, nbl_lru
);
7158 list_del_init(&nbl
->nbl_lru
);
7159 posix_unblock_lock(&nbl
->nbl_lock
);
7160 free_blocked_lock(nbl
);
7163 nfsd4_client_tracking_exit(net
);
7164 nfs4_state_destroy_net(net
);
7168 nfs4_state_shutdown(void)
7170 destroy_workqueue(laundry_wq
);
7171 nfsd4_destroy_callback_queue();
7172 cleanup_callback_cred();
7176 get_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
7178 if (HAS_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
) && CURRENT_STATEID(stateid
))
7179 memcpy(stateid
, &cstate
->current_stateid
, sizeof(stateid_t
));
7183 put_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
7185 if (cstate
->minorversion
) {
7186 memcpy(&cstate
->current_stateid
, stateid
, sizeof(stateid_t
));
7187 SET_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
7192 clear_current_stateid(struct nfsd4_compound_state
*cstate
)
7194 CLEAR_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
7198 * functions to set current state id
7201 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state
*cstate
,
7202 union nfsd4_op_u
*u
)
7204 put_stateid(cstate
, &u
->open_downgrade
.od_stateid
);
7208 nfsd4_set_openstateid(struct nfsd4_compound_state
*cstate
,
7209 union nfsd4_op_u
*u
)
7211 put_stateid(cstate
, &u
->open
.op_stateid
);
7215 nfsd4_set_closestateid(struct nfsd4_compound_state
*cstate
,
7216 union nfsd4_op_u
*u
)
7218 put_stateid(cstate
, &u
->close
.cl_stateid
);
7222 nfsd4_set_lockstateid(struct nfsd4_compound_state
*cstate
,
7223 union nfsd4_op_u
*u
)
7225 put_stateid(cstate
, &u
->lock
.lk_resp_stateid
);
7229 * functions to consume current state id
7233 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state
*cstate
,
7234 union nfsd4_op_u
*u
)
7236 get_stateid(cstate
, &u
->open_downgrade
.od_stateid
);
7240 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state
*cstate
,
7241 union nfsd4_op_u
*u
)
7243 get_stateid(cstate
, &u
->delegreturn
.dr_stateid
);
7247 nfsd4_get_freestateid(struct nfsd4_compound_state
*cstate
,
7248 union nfsd4_op_u
*u
)
7250 get_stateid(cstate
, &u
->free_stateid
.fr_stateid
);
7254 nfsd4_get_setattrstateid(struct nfsd4_compound_state
*cstate
,
7255 union nfsd4_op_u
*u
)
7257 get_stateid(cstate
, &u
->setattr
.sa_stateid
);
7261 nfsd4_get_closestateid(struct nfsd4_compound_state
*cstate
,
7262 union nfsd4_op_u
*u
)
7264 get_stateid(cstate
, &u
->close
.cl_stateid
);
7268 nfsd4_get_lockustateid(struct nfsd4_compound_state
*cstate
,
7269 union nfsd4_op_u
*u
)
7271 get_stateid(cstate
, &u
->locku
.lu_stateid
);
7275 nfsd4_get_readstateid(struct nfsd4_compound_state
*cstate
,
7276 union nfsd4_op_u
*u
)
7278 get_stateid(cstate
, &u
->read
.rd_stateid
);
7282 nfsd4_get_writestateid(struct nfsd4_compound_state
*cstate
,
7283 union nfsd4_op_u
*u
)
7285 get_stateid(cstate
, &u
->write
.wr_stateid
);