2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/file.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
49 #include "current_stateid.h"
53 #include "filecache.h"
55 #define NFSDDBG_FACILITY NFSDDBG_PROC
57 #define all_ones {{~0,~0},~0}
58 static const stateid_t one_stateid
= {
60 .si_opaque
= all_ones
,
62 static const stateid_t zero_stateid
= {
65 static const stateid_t currentstateid
= {
68 static const stateid_t close_stateid
= {
69 .si_generation
= 0xffffffffU
,
72 static u64 current_sessionid
= 1;
74 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
75 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
76 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
77 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
79 /* forward declarations */
80 static bool check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
);
81 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
);
82 void nfsd4_end_grace(struct nfsd_net
*nn
);
83 static void _free_cpntf_state_locked(struct nfsd_net
*nn
, struct nfs4_cpntf_state
*cps
);
88 * Currently used for the del_recall_lru and file hash table. In an
89 * effort to decrease the scope of the client_mutex, this spinlock may
90 * eventually cover more:
92 static DEFINE_SPINLOCK(state_lock
);
94 enum nfsd4_st_mutex_lock_subclass
{
95 OPEN_STATEID_MUTEX
= 0,
96 LOCK_STATEID_MUTEX
= 1,
100 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
101 * the refcount on the open stateid to drop.
103 static DECLARE_WAIT_QUEUE_HEAD(close_wq
);
106 * A waitqueue where a writer to clients/#/ctl destroying a client can
107 * wait for cl_rpc_users to drop to 0 and then for the client to be
110 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq
);
112 static struct kmem_cache
*client_slab
;
113 static struct kmem_cache
*openowner_slab
;
114 static struct kmem_cache
*lockowner_slab
;
115 static struct kmem_cache
*file_slab
;
116 static struct kmem_cache
*stateid_slab
;
117 static struct kmem_cache
*deleg_slab
;
118 static struct kmem_cache
*odstate_slab
;
120 static void free_session(struct nfsd4_session
*);
122 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops
;
123 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops
;
125 static bool is_session_dead(struct nfsd4_session
*ses
)
127 return ses
->se_flags
& NFS4_SESSION_DEAD
;
130 static __be32
mark_session_dead_locked(struct nfsd4_session
*ses
, int ref_held_by_me
)
132 if (atomic_read(&ses
->se_ref
) > ref_held_by_me
)
133 return nfserr_jukebox
;
134 ses
->se_flags
|= NFS4_SESSION_DEAD
;
138 static bool is_client_expired(struct nfs4_client
*clp
)
140 return clp
->cl_time
== 0;
143 static __be32
get_client_locked(struct nfs4_client
*clp
)
145 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
147 lockdep_assert_held(&nn
->client_lock
);
149 if (is_client_expired(clp
))
150 return nfserr_expired
;
151 atomic_inc(&clp
->cl_rpc_users
);
155 /* must be called under the client_lock */
157 renew_client_locked(struct nfs4_client
*clp
)
159 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
161 if (is_client_expired(clp
)) {
163 printk("%s: client (clientid %08x/%08x) already expired\n",
165 clp
->cl_clientid
.cl_boot
,
166 clp
->cl_clientid
.cl_id
);
170 dprintk("renewing client (clientid %08x/%08x)\n",
171 clp
->cl_clientid
.cl_boot
,
172 clp
->cl_clientid
.cl_id
);
173 list_move_tail(&clp
->cl_lru
, &nn
->client_lru
);
174 clp
->cl_time
= ktime_get_boottime_seconds();
177 static void put_client_renew_locked(struct nfs4_client
*clp
)
179 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
181 lockdep_assert_held(&nn
->client_lock
);
183 if (!atomic_dec_and_test(&clp
->cl_rpc_users
))
185 if (!is_client_expired(clp
))
186 renew_client_locked(clp
);
188 wake_up_all(&expiry_wq
);
191 static void put_client_renew(struct nfs4_client
*clp
)
193 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
195 if (!atomic_dec_and_lock(&clp
->cl_rpc_users
, &nn
->client_lock
))
197 if (!is_client_expired(clp
))
198 renew_client_locked(clp
);
200 wake_up_all(&expiry_wq
);
201 spin_unlock(&nn
->client_lock
);
204 static __be32
nfsd4_get_session_locked(struct nfsd4_session
*ses
)
208 if (is_session_dead(ses
))
209 return nfserr_badsession
;
210 status
= get_client_locked(ses
->se_client
);
213 atomic_inc(&ses
->se_ref
);
217 static void nfsd4_put_session_locked(struct nfsd4_session
*ses
)
219 struct nfs4_client
*clp
= ses
->se_client
;
220 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
222 lockdep_assert_held(&nn
->client_lock
);
224 if (atomic_dec_and_test(&ses
->se_ref
) && is_session_dead(ses
))
226 put_client_renew_locked(clp
);
229 static void nfsd4_put_session(struct nfsd4_session
*ses
)
231 struct nfs4_client
*clp
= ses
->se_client
;
232 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
234 spin_lock(&nn
->client_lock
);
235 nfsd4_put_session_locked(ses
);
236 spin_unlock(&nn
->client_lock
);
239 static struct nfsd4_blocked_lock
*
240 find_blocked_lock(struct nfs4_lockowner
*lo
, struct knfsd_fh
*fh
,
243 struct nfsd4_blocked_lock
*cur
, *found
= NULL
;
245 spin_lock(&nn
->blocked_locks_lock
);
246 list_for_each_entry(cur
, &lo
->lo_blocked
, nbl_list
) {
247 if (fh_match(fh
, &cur
->nbl_fh
)) {
248 list_del_init(&cur
->nbl_list
);
249 list_del_init(&cur
->nbl_lru
);
254 spin_unlock(&nn
->blocked_locks_lock
);
256 locks_delete_block(&found
->nbl_lock
);
260 static struct nfsd4_blocked_lock
*
261 find_or_allocate_block(struct nfs4_lockowner
*lo
, struct knfsd_fh
*fh
,
264 struct nfsd4_blocked_lock
*nbl
;
266 nbl
= find_blocked_lock(lo
, fh
, nn
);
268 nbl
= kmalloc(sizeof(*nbl
), GFP_KERNEL
);
270 fh_copy_shallow(&nbl
->nbl_fh
, fh
);
271 locks_init_lock(&nbl
->nbl_lock
);
272 nfsd4_init_cb(&nbl
->nbl_cb
, lo
->lo_owner
.so_client
,
273 &nfsd4_cb_notify_lock_ops
,
274 NFSPROC4_CLNT_CB_NOTIFY_LOCK
);
281 free_blocked_lock(struct nfsd4_blocked_lock
*nbl
)
283 locks_delete_block(&nbl
->nbl_lock
);
284 locks_release_private(&nbl
->nbl_lock
);
289 remove_blocked_locks(struct nfs4_lockowner
*lo
)
291 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
292 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
293 struct nfsd4_blocked_lock
*nbl
;
296 /* Dequeue all blocked locks */
297 spin_lock(&nn
->blocked_locks_lock
);
298 while (!list_empty(&lo
->lo_blocked
)) {
299 nbl
= list_first_entry(&lo
->lo_blocked
,
300 struct nfsd4_blocked_lock
,
302 list_del_init(&nbl
->nbl_list
);
303 list_move(&nbl
->nbl_lru
, &reaplist
);
305 spin_unlock(&nn
->blocked_locks_lock
);
308 while (!list_empty(&reaplist
)) {
309 nbl
= list_first_entry(&reaplist
, struct nfsd4_blocked_lock
,
311 list_del_init(&nbl
->nbl_lru
);
312 free_blocked_lock(nbl
);
317 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback
*cb
)
319 struct nfsd4_blocked_lock
*nbl
= container_of(cb
,
320 struct nfsd4_blocked_lock
, nbl_cb
);
321 locks_delete_block(&nbl
->nbl_lock
);
325 nfsd4_cb_notify_lock_done(struct nfsd4_callback
*cb
, struct rpc_task
*task
)
328 * Since this is just an optimization, we don't try very hard if it
329 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
330 * just quit trying on anything else.
332 switch (task
->tk_status
) {
334 rpc_delay(task
, 1 * HZ
);
342 nfsd4_cb_notify_lock_release(struct nfsd4_callback
*cb
)
344 struct nfsd4_blocked_lock
*nbl
= container_of(cb
,
345 struct nfsd4_blocked_lock
, nbl_cb
);
347 free_blocked_lock(nbl
);
350 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops
= {
351 .prepare
= nfsd4_cb_notify_lock_prepare
,
352 .done
= nfsd4_cb_notify_lock_done
,
353 .release
= nfsd4_cb_notify_lock_release
,
356 static inline struct nfs4_stateowner
*
357 nfs4_get_stateowner(struct nfs4_stateowner
*sop
)
359 atomic_inc(&sop
->so_count
);
364 same_owner_str(struct nfs4_stateowner
*sop
, struct xdr_netobj
*owner
)
366 return (sop
->so_owner
.len
== owner
->len
) &&
367 0 == memcmp(sop
->so_owner
.data
, owner
->data
, owner
->len
);
370 static struct nfs4_openowner
*
371 find_openstateowner_str_locked(unsigned int hashval
, struct nfsd4_open
*open
,
372 struct nfs4_client
*clp
)
374 struct nfs4_stateowner
*so
;
376 lockdep_assert_held(&clp
->cl_lock
);
378 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[hashval
],
380 if (!so
->so_is_open_owner
)
382 if (same_owner_str(so
, &open
->op_owner
))
383 return openowner(nfs4_get_stateowner(so
));
388 static struct nfs4_openowner
*
389 find_openstateowner_str(unsigned int hashval
, struct nfsd4_open
*open
,
390 struct nfs4_client
*clp
)
392 struct nfs4_openowner
*oo
;
394 spin_lock(&clp
->cl_lock
);
395 oo
= find_openstateowner_str_locked(hashval
, open
, clp
);
396 spin_unlock(&clp
->cl_lock
);
401 opaque_hashval(const void *ptr
, int nbytes
)
403 unsigned char *cptr
= (unsigned char *) ptr
;
413 static void nfsd4_free_file_rcu(struct rcu_head
*rcu
)
415 struct nfs4_file
*fp
= container_of(rcu
, struct nfs4_file
, fi_rcu
);
417 kmem_cache_free(file_slab
, fp
);
421 put_nfs4_file(struct nfs4_file
*fi
)
423 might_lock(&state_lock
);
425 if (refcount_dec_and_lock(&fi
->fi_ref
, &state_lock
)) {
426 hlist_del_rcu(&fi
->fi_hash
);
427 spin_unlock(&state_lock
);
428 WARN_ON_ONCE(!list_empty(&fi
->fi_clnt_odstate
));
429 WARN_ON_ONCE(!list_empty(&fi
->fi_delegations
));
430 call_rcu(&fi
->fi_rcu
, nfsd4_free_file_rcu
);
434 static struct nfsd_file
*
435 __nfs4_get_fd(struct nfs4_file
*f
, int oflag
)
437 if (f
->fi_fds
[oflag
])
438 return nfsd_file_get(f
->fi_fds
[oflag
]);
442 static struct nfsd_file
*
443 find_writeable_file_locked(struct nfs4_file
*f
)
445 struct nfsd_file
*ret
;
447 lockdep_assert_held(&f
->fi_lock
);
449 ret
= __nfs4_get_fd(f
, O_WRONLY
);
451 ret
= __nfs4_get_fd(f
, O_RDWR
);
455 static struct nfsd_file
*
456 find_writeable_file(struct nfs4_file
*f
)
458 struct nfsd_file
*ret
;
460 spin_lock(&f
->fi_lock
);
461 ret
= find_writeable_file_locked(f
);
462 spin_unlock(&f
->fi_lock
);
467 static struct nfsd_file
*
468 find_readable_file_locked(struct nfs4_file
*f
)
470 struct nfsd_file
*ret
;
472 lockdep_assert_held(&f
->fi_lock
);
474 ret
= __nfs4_get_fd(f
, O_RDONLY
);
476 ret
= __nfs4_get_fd(f
, O_RDWR
);
480 static struct nfsd_file
*
481 find_readable_file(struct nfs4_file
*f
)
483 struct nfsd_file
*ret
;
485 spin_lock(&f
->fi_lock
);
486 ret
= find_readable_file_locked(f
);
487 spin_unlock(&f
->fi_lock
);
493 find_any_file(struct nfs4_file
*f
)
495 struct nfsd_file
*ret
;
499 spin_lock(&f
->fi_lock
);
500 ret
= __nfs4_get_fd(f
, O_RDWR
);
502 ret
= __nfs4_get_fd(f
, O_WRONLY
);
504 ret
= __nfs4_get_fd(f
, O_RDONLY
);
506 spin_unlock(&f
->fi_lock
);
510 static atomic_long_t num_delegations
;
511 unsigned long max_delegations
;
514 * Open owner state (share locks)
517 /* hash tables for lock and open owners */
518 #define OWNER_HASH_BITS 8
519 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
520 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
522 static unsigned int ownerstr_hashval(struct xdr_netobj
*ownername
)
526 ret
= opaque_hashval(ownername
->data
, ownername
->len
);
527 return ret
& OWNER_HASH_MASK
;
530 /* hash table for nfs4_file */
531 #define FILE_HASH_BITS 8
532 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
534 static unsigned int nfsd_fh_hashval(struct knfsd_fh
*fh
)
536 return jhash2(fh
->fh_base
.fh_pad
, XDR_QUADLEN(fh
->fh_size
), 0);
539 static unsigned int file_hashval(struct knfsd_fh
*fh
)
541 return nfsd_fh_hashval(fh
) & (FILE_HASH_SIZE
- 1);
544 static struct hlist_head file_hashtbl
[FILE_HASH_SIZE
];
547 __nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
549 lockdep_assert_held(&fp
->fi_lock
);
551 if (access
& NFS4_SHARE_ACCESS_WRITE
)
552 atomic_inc(&fp
->fi_access
[O_WRONLY
]);
553 if (access
& NFS4_SHARE_ACCESS_READ
)
554 atomic_inc(&fp
->fi_access
[O_RDONLY
]);
558 nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
560 lockdep_assert_held(&fp
->fi_lock
);
562 /* Does this access mode make sense? */
563 if (access
& ~NFS4_SHARE_ACCESS_BOTH
)
566 /* Does it conflict with a deny mode already set? */
567 if ((access
& fp
->fi_share_deny
) != 0)
568 return nfserr_share_denied
;
570 __nfs4_file_get_access(fp
, access
);
574 static __be32
nfs4_file_check_deny(struct nfs4_file
*fp
, u32 deny
)
576 /* Common case is that there is no deny mode. */
578 /* Does this deny mode make sense? */
579 if (deny
& ~NFS4_SHARE_DENY_BOTH
)
582 if ((deny
& NFS4_SHARE_DENY_READ
) &&
583 atomic_read(&fp
->fi_access
[O_RDONLY
]))
584 return nfserr_share_denied
;
586 if ((deny
& NFS4_SHARE_DENY_WRITE
) &&
587 atomic_read(&fp
->fi_access
[O_WRONLY
]))
588 return nfserr_share_denied
;
593 static void __nfs4_file_put_access(struct nfs4_file
*fp
, int oflag
)
595 might_lock(&fp
->fi_lock
);
597 if (atomic_dec_and_lock(&fp
->fi_access
[oflag
], &fp
->fi_lock
)) {
598 struct nfsd_file
*f1
= NULL
;
599 struct nfsd_file
*f2
= NULL
;
601 swap(f1
, fp
->fi_fds
[oflag
]);
602 if (atomic_read(&fp
->fi_access
[1 - oflag
]) == 0)
603 swap(f2
, fp
->fi_fds
[O_RDWR
]);
604 spin_unlock(&fp
->fi_lock
);
612 static void nfs4_file_put_access(struct nfs4_file
*fp
, u32 access
)
614 WARN_ON_ONCE(access
& ~NFS4_SHARE_ACCESS_BOTH
);
616 if (access
& NFS4_SHARE_ACCESS_WRITE
)
617 __nfs4_file_put_access(fp
, O_WRONLY
);
618 if (access
& NFS4_SHARE_ACCESS_READ
)
619 __nfs4_file_put_access(fp
, O_RDONLY
);
623 * Allocate a new open/delegation state counter. This is needed for
624 * pNFS for proper return on close semantics.
626 * Note that we only allocate it for pNFS-enabled exports, otherwise
627 * all pointers to struct nfs4_clnt_odstate are always NULL.
629 static struct nfs4_clnt_odstate
*
630 alloc_clnt_odstate(struct nfs4_client
*clp
)
632 struct nfs4_clnt_odstate
*co
;
634 co
= kmem_cache_zalloc(odstate_slab
, GFP_KERNEL
);
637 refcount_set(&co
->co_odcount
, 1);
643 hash_clnt_odstate_locked(struct nfs4_clnt_odstate
*co
)
645 struct nfs4_file
*fp
= co
->co_file
;
647 lockdep_assert_held(&fp
->fi_lock
);
648 list_add(&co
->co_perfile
, &fp
->fi_clnt_odstate
);
652 get_clnt_odstate(struct nfs4_clnt_odstate
*co
)
655 refcount_inc(&co
->co_odcount
);
659 put_clnt_odstate(struct nfs4_clnt_odstate
*co
)
661 struct nfs4_file
*fp
;
667 if (refcount_dec_and_lock(&co
->co_odcount
, &fp
->fi_lock
)) {
668 list_del(&co
->co_perfile
);
669 spin_unlock(&fp
->fi_lock
);
671 nfsd4_return_all_file_layouts(co
->co_client
, fp
);
672 kmem_cache_free(odstate_slab
, co
);
676 static struct nfs4_clnt_odstate
*
677 find_or_hash_clnt_odstate(struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*new)
679 struct nfs4_clnt_odstate
*co
;
680 struct nfs4_client
*cl
;
687 spin_lock(&fp
->fi_lock
);
688 list_for_each_entry(co
, &fp
->fi_clnt_odstate
, co_perfile
) {
689 if (co
->co_client
== cl
) {
690 get_clnt_odstate(co
);
696 hash_clnt_odstate_locked(new);
698 spin_unlock(&fp
->fi_lock
);
702 struct nfs4_stid
*nfs4_alloc_stid(struct nfs4_client
*cl
, struct kmem_cache
*slab
,
703 void (*sc_free
)(struct nfs4_stid
*))
705 struct nfs4_stid
*stid
;
708 stid
= kmem_cache_zalloc(slab
, GFP_KERNEL
);
712 idr_preload(GFP_KERNEL
);
713 spin_lock(&cl
->cl_lock
);
714 /* Reserving 0 for start of file in nfsdfs "states" file: */
715 new_id
= idr_alloc_cyclic(&cl
->cl_stateids
, stid
, 1, 0, GFP_NOWAIT
);
716 spin_unlock(&cl
->cl_lock
);
721 stid
->sc_free
= sc_free
;
722 stid
->sc_client
= cl
;
723 stid
->sc_stateid
.si_opaque
.so_id
= new_id
;
724 stid
->sc_stateid
.si_opaque
.so_clid
= cl
->cl_clientid
;
725 /* Will be incremented before return to client: */
726 refcount_set(&stid
->sc_count
, 1);
727 spin_lock_init(&stid
->sc_lock
);
728 INIT_LIST_HEAD(&stid
->sc_cp_list
);
731 * It shouldn't be a problem to reuse an opaque stateid value.
732 * I don't think it is for 4.1. But with 4.0 I worry that, for
733 * example, a stray write retransmission could be accepted by
734 * the server when it should have been rejected. Therefore,
735 * adopt a trick from the sctp code to attempt to maximize the
736 * amount of time until an id is reused, by ensuring they always
737 * "increase" (mod INT_MAX):
741 kmem_cache_free(slab
, stid
);
746 * Create a unique stateid_t to represent each COPY.
748 static int nfs4_init_cp_state(struct nfsd_net
*nn
, copy_stateid_t
*stid
,
749 unsigned char sc_type
)
753 stid
->stid
.si_opaque
.so_clid
.cl_boot
= (u32
)nn
->boot_time
;
754 stid
->stid
.si_opaque
.so_clid
.cl_id
= nn
->s2s_cp_cl_id
;
755 stid
->sc_type
= sc_type
;
757 idr_preload(GFP_KERNEL
);
758 spin_lock(&nn
->s2s_cp_lock
);
759 new_id
= idr_alloc_cyclic(&nn
->s2s_cp_stateids
, stid
, 0, 0, GFP_NOWAIT
);
760 stid
->stid
.si_opaque
.so_id
= new_id
;
761 spin_unlock(&nn
->s2s_cp_lock
);
768 int nfs4_init_copy_state(struct nfsd_net
*nn
, struct nfsd4_copy
*copy
)
770 return nfs4_init_cp_state(nn
, ©
->cp_stateid
, NFS4_COPY_STID
);
773 struct nfs4_cpntf_state
*nfs4_alloc_init_cpntf_state(struct nfsd_net
*nn
,
774 struct nfs4_stid
*p_stid
)
776 struct nfs4_cpntf_state
*cps
;
778 cps
= kzalloc(sizeof(struct nfs4_cpntf_state
), GFP_KERNEL
);
781 cps
->cpntf_time
= ktime_get_boottime_seconds();
782 refcount_set(&cps
->cp_stateid
.sc_count
, 1);
783 if (!nfs4_init_cp_state(nn
, &cps
->cp_stateid
, NFS4_COPYNOTIFY_STID
))
785 spin_lock(&nn
->s2s_cp_lock
);
786 list_add(&cps
->cp_list
, &p_stid
->sc_cp_list
);
787 spin_unlock(&nn
->s2s_cp_lock
);
794 void nfs4_free_copy_state(struct nfsd4_copy
*copy
)
798 WARN_ON_ONCE(copy
->cp_stateid
.sc_type
!= NFS4_COPY_STID
);
799 nn
= net_generic(copy
->cp_clp
->net
, nfsd_net_id
);
800 spin_lock(&nn
->s2s_cp_lock
);
801 idr_remove(&nn
->s2s_cp_stateids
,
802 copy
->cp_stateid
.stid
.si_opaque
.so_id
);
803 spin_unlock(&nn
->s2s_cp_lock
);
806 static void nfs4_free_cpntf_statelist(struct net
*net
, struct nfs4_stid
*stid
)
808 struct nfs4_cpntf_state
*cps
;
811 nn
= net_generic(net
, nfsd_net_id
);
812 spin_lock(&nn
->s2s_cp_lock
);
813 while (!list_empty(&stid
->sc_cp_list
)) {
814 cps
= list_first_entry(&stid
->sc_cp_list
,
815 struct nfs4_cpntf_state
, cp_list
);
816 _free_cpntf_state_locked(nn
, cps
);
818 spin_unlock(&nn
->s2s_cp_lock
);
821 static struct nfs4_ol_stateid
* nfs4_alloc_open_stateid(struct nfs4_client
*clp
)
823 struct nfs4_stid
*stid
;
825 stid
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_ol_stateid
);
829 return openlockstateid(stid
);
832 static void nfs4_free_deleg(struct nfs4_stid
*stid
)
834 kmem_cache_free(deleg_slab
, stid
);
835 atomic_long_dec(&num_delegations
);
839 * When we recall a delegation, we should be careful not to hand it
840 * out again straight away.
841 * To ensure this we keep a pair of bloom filters ('new' and 'old')
842 * in which the filehandles of recalled delegations are "stored".
843 * If a filehandle appear in either filter, a delegation is blocked.
844 * When a delegation is recalled, the filehandle is stored in the "new"
846 * Every 30 seconds we swap the filters and clear the "new" one,
847 * unless both are empty of course.
849 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
850 * low 3 bytes as hash-table indices.
852 * 'blocked_delegations_lock', which is always taken in block_delegations(),
853 * is used to manage concurrent access. Testing does not need the lock
854 * except when swapping the two filters.
856 static DEFINE_SPINLOCK(blocked_delegations_lock
);
857 static struct bloom_pair
{
858 int entries
, old_entries
;
860 int new; /* index into 'set' */
861 DECLARE_BITMAP(set
[2], 256);
862 } blocked_delegations
;
864 static int delegation_blocked(struct knfsd_fh
*fh
)
867 struct bloom_pair
*bd
= &blocked_delegations
;
869 if (bd
->entries
== 0)
871 if (ktime_get_seconds() - bd
->swap_time
> 30) {
872 spin_lock(&blocked_delegations_lock
);
873 if (ktime_get_seconds() - bd
->swap_time
> 30) {
874 bd
->entries
-= bd
->old_entries
;
875 bd
->old_entries
= bd
->entries
;
876 memset(bd
->set
[bd
->new], 0,
879 bd
->swap_time
= ktime_get_seconds();
881 spin_unlock(&blocked_delegations_lock
);
883 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
884 if (test_bit(hash
&255, bd
->set
[0]) &&
885 test_bit((hash
>>8)&255, bd
->set
[0]) &&
886 test_bit((hash
>>16)&255, bd
->set
[0]))
889 if (test_bit(hash
&255, bd
->set
[1]) &&
890 test_bit((hash
>>8)&255, bd
->set
[1]) &&
891 test_bit((hash
>>16)&255, bd
->set
[1]))
897 static void block_delegations(struct knfsd_fh
*fh
)
900 struct bloom_pair
*bd
= &blocked_delegations
;
902 hash
= jhash(&fh
->fh_base
, fh
->fh_size
, 0);
904 spin_lock(&blocked_delegations_lock
);
905 __set_bit(hash
&255, bd
->set
[bd
->new]);
906 __set_bit((hash
>>8)&255, bd
->set
[bd
->new]);
907 __set_bit((hash
>>16)&255, bd
->set
[bd
->new]);
908 if (bd
->entries
== 0)
909 bd
->swap_time
= ktime_get_seconds();
911 spin_unlock(&blocked_delegations_lock
);
914 static struct nfs4_delegation
*
915 alloc_init_deleg(struct nfs4_client
*clp
, struct nfs4_file
*fp
,
916 struct svc_fh
*current_fh
,
917 struct nfs4_clnt_odstate
*odstate
)
919 struct nfs4_delegation
*dp
;
922 dprintk("NFSD alloc_init_deleg\n");
923 n
= atomic_long_inc_return(&num_delegations
);
924 if (n
< 0 || n
> max_delegations
)
926 if (delegation_blocked(¤t_fh
->fh_handle
))
928 dp
= delegstateid(nfs4_alloc_stid(clp
, deleg_slab
, nfs4_free_deleg
));
933 * delegation seqid's are never incremented. The 4.1 special
934 * meaning of seqid 0 isn't meaningful, really, but let's avoid
935 * 0 anyway just for consistency and use 1:
937 dp
->dl_stid
.sc_stateid
.si_generation
= 1;
938 INIT_LIST_HEAD(&dp
->dl_perfile
);
939 INIT_LIST_HEAD(&dp
->dl_perclnt
);
940 INIT_LIST_HEAD(&dp
->dl_recall_lru
);
941 dp
->dl_clnt_odstate
= odstate
;
942 get_clnt_odstate(odstate
);
943 dp
->dl_type
= NFS4_OPEN_DELEGATE_READ
;
945 nfsd4_init_cb(&dp
->dl_recall
, dp
->dl_stid
.sc_client
,
946 &nfsd4_cb_recall_ops
, NFSPROC4_CLNT_CB_RECALL
);
948 dp
->dl_stid
.sc_file
= fp
;
951 atomic_long_dec(&num_delegations
);
956 nfs4_put_stid(struct nfs4_stid
*s
)
958 struct nfs4_file
*fp
= s
->sc_file
;
959 struct nfs4_client
*clp
= s
->sc_client
;
961 might_lock(&clp
->cl_lock
);
963 if (!refcount_dec_and_lock(&s
->sc_count
, &clp
->cl_lock
)) {
964 wake_up_all(&close_wq
);
967 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
968 nfs4_free_cpntf_statelist(clp
->net
, s
);
969 spin_unlock(&clp
->cl_lock
);
976 nfs4_inc_and_copy_stateid(stateid_t
*dst
, struct nfs4_stid
*stid
)
978 stateid_t
*src
= &stid
->sc_stateid
;
980 spin_lock(&stid
->sc_lock
);
981 if (unlikely(++src
->si_generation
== 0))
982 src
->si_generation
= 1;
983 memcpy(dst
, src
, sizeof(*dst
));
984 spin_unlock(&stid
->sc_lock
);
987 static void put_deleg_file(struct nfs4_file
*fp
)
989 struct nfsd_file
*nf
= NULL
;
991 spin_lock(&fp
->fi_lock
);
992 if (--fp
->fi_delegees
== 0)
993 swap(nf
, fp
->fi_deleg_file
);
994 spin_unlock(&fp
->fi_lock
);
1000 static void nfs4_unlock_deleg_lease(struct nfs4_delegation
*dp
)
1002 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
1003 struct nfsd_file
*nf
= fp
->fi_deleg_file
;
1005 WARN_ON_ONCE(!fp
->fi_delegees
);
1007 vfs_setlease(nf
->nf_file
, F_UNLCK
, NULL
, (void **)&dp
);
1011 static void destroy_unhashed_deleg(struct nfs4_delegation
*dp
)
1013 put_clnt_odstate(dp
->dl_clnt_odstate
);
1014 nfs4_unlock_deleg_lease(dp
);
1015 nfs4_put_stid(&dp
->dl_stid
);
1018 void nfs4_unhash_stid(struct nfs4_stid
*s
)
1024 * nfs4_delegation_exists - Discover if this delegation already exists
1025 * @clp: a pointer to the nfs4_client we're granting a delegation to
1026 * @fp: a pointer to the nfs4_file we're granting a delegation on
1029 * On success: true iff an existing delegation is found
1033 nfs4_delegation_exists(struct nfs4_client
*clp
, struct nfs4_file
*fp
)
1035 struct nfs4_delegation
*searchdp
= NULL
;
1036 struct nfs4_client
*searchclp
= NULL
;
1038 lockdep_assert_held(&state_lock
);
1039 lockdep_assert_held(&fp
->fi_lock
);
1041 list_for_each_entry(searchdp
, &fp
->fi_delegations
, dl_perfile
) {
1042 searchclp
= searchdp
->dl_stid
.sc_client
;
1043 if (clp
== searchclp
) {
1051 * hash_delegation_locked - Add a delegation to the appropriate lists
1052 * @dp: a pointer to the nfs4_delegation we are adding.
1053 * @fp: a pointer to the nfs4_file we're granting a delegation on
1056 * On success: NULL if the delegation was successfully hashed.
1058 * On error: -EAGAIN if one was previously granted to this
1059 * nfs4_client for this nfs4_file. Delegation is not hashed.
1064 hash_delegation_locked(struct nfs4_delegation
*dp
, struct nfs4_file
*fp
)
1066 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
1068 lockdep_assert_held(&state_lock
);
1069 lockdep_assert_held(&fp
->fi_lock
);
1071 if (nfs4_delegation_exists(clp
, fp
))
1073 refcount_inc(&dp
->dl_stid
.sc_count
);
1074 dp
->dl_stid
.sc_type
= NFS4_DELEG_STID
;
1075 list_add(&dp
->dl_perfile
, &fp
->fi_delegations
);
1076 list_add(&dp
->dl_perclnt
, &clp
->cl_delegations
);
1081 unhash_delegation_locked(struct nfs4_delegation
*dp
)
1083 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
1085 lockdep_assert_held(&state_lock
);
1087 if (list_empty(&dp
->dl_perfile
))
1090 dp
->dl_stid
.sc_type
= NFS4_CLOSED_DELEG_STID
;
1091 /* Ensure that deleg break won't try to requeue it */
1093 spin_lock(&fp
->fi_lock
);
1094 list_del_init(&dp
->dl_perclnt
);
1095 list_del_init(&dp
->dl_recall_lru
);
1096 list_del_init(&dp
->dl_perfile
);
1097 spin_unlock(&fp
->fi_lock
);
1101 static void destroy_delegation(struct nfs4_delegation
*dp
)
1105 spin_lock(&state_lock
);
1106 unhashed
= unhash_delegation_locked(dp
);
1107 spin_unlock(&state_lock
);
1109 destroy_unhashed_deleg(dp
);
1112 static void revoke_delegation(struct nfs4_delegation
*dp
)
1114 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
1116 WARN_ON(!list_empty(&dp
->dl_recall_lru
));
1118 if (clp
->cl_minorversion
) {
1119 dp
->dl_stid
.sc_type
= NFS4_REVOKED_DELEG_STID
;
1120 refcount_inc(&dp
->dl_stid
.sc_count
);
1121 spin_lock(&clp
->cl_lock
);
1122 list_add(&dp
->dl_recall_lru
, &clp
->cl_revoked
);
1123 spin_unlock(&clp
->cl_lock
);
1125 destroy_unhashed_deleg(dp
);
1132 static unsigned int clientid_hashval(u32 id
)
1134 return id
& CLIENT_HASH_MASK
;
1137 static unsigned int clientstr_hashval(struct xdr_netobj name
)
1139 return opaque_hashval(name
.data
, 8) & CLIENT_HASH_MASK
;
1143 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1144 * st_{access,deny}_bmap field of the stateid, in order to track not
1145 * only what share bits are currently in force, but also what
1146 * combinations of share bits previous opens have used. This allows us
1147 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1148 * return an error if the client attempt to downgrade to a combination
1149 * of share bits not explicable by closing some of its previous opens.
1151 * XXX: This enforcement is actually incomplete, since we don't keep
1152 * track of access/deny bit combinations; so, e.g., we allow:
1154 * OPEN allow read, deny write
1155 * OPEN allow both, deny none
1156 * DOWNGRADE allow read, deny none
1158 * which we should reject.
1161 bmap_to_share_mode(unsigned long bmap
) {
1163 unsigned int access
= 0;
1165 for (i
= 1; i
< 4; i
++) {
1166 if (test_bit(i
, &bmap
))
1172 /* set share access for a given stateid */
1174 set_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1176 unsigned char mask
= 1 << access
;
1178 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
1179 stp
->st_access_bmap
|= mask
;
1182 /* clear share access for a given stateid */
1184 clear_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1186 unsigned char mask
= 1 << access
;
1188 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
1189 stp
->st_access_bmap
&= ~mask
;
1192 /* test whether a given stateid has access */
1194 test_access(u32 access
, struct nfs4_ol_stateid
*stp
)
1196 unsigned char mask
= 1 << access
;
1198 return (bool)(stp
->st_access_bmap
& mask
);
1201 /* set share deny for a given stateid */
1203 set_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1205 unsigned char mask
= 1 << deny
;
1207 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
1208 stp
->st_deny_bmap
|= mask
;
1211 /* clear share deny for a given stateid */
1213 clear_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1215 unsigned char mask
= 1 << deny
;
1217 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
1218 stp
->st_deny_bmap
&= ~mask
;
1221 /* test whether a given stateid is denying specific access */
1223 test_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1225 unsigned char mask
= 1 << deny
;
1227 return (bool)(stp
->st_deny_bmap
& mask
);
1230 static int nfs4_access_to_omode(u32 access
)
1232 switch (access
& NFS4_SHARE_ACCESS_BOTH
) {
1233 case NFS4_SHARE_ACCESS_READ
:
1235 case NFS4_SHARE_ACCESS_WRITE
:
1237 case NFS4_SHARE_ACCESS_BOTH
:
1245 * A stateid that had a deny mode associated with it is being released
1246 * or downgraded. Recalculate the deny mode on the file.
1249 recalculate_deny_mode(struct nfs4_file
*fp
)
1251 struct nfs4_ol_stateid
*stp
;
1253 spin_lock(&fp
->fi_lock
);
1254 fp
->fi_share_deny
= 0;
1255 list_for_each_entry(stp
, &fp
->fi_stateids
, st_perfile
)
1256 fp
->fi_share_deny
|= bmap_to_share_mode(stp
->st_deny_bmap
);
1257 spin_unlock(&fp
->fi_lock
);
1261 reset_union_bmap_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
1264 bool change
= false;
1266 for (i
= 1; i
< 4; i
++) {
1267 if ((i
& deny
) != i
) {
1273 /* Recalculate per-file deny mode if there was a change */
1275 recalculate_deny_mode(stp
->st_stid
.sc_file
);
1278 /* release all access and file references for a given stateid */
1280 release_all_access(struct nfs4_ol_stateid
*stp
)
1283 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1285 if (fp
&& stp
->st_deny_bmap
!= 0)
1286 recalculate_deny_mode(fp
);
1288 for (i
= 1; i
< 4; i
++) {
1289 if (test_access(i
, stp
))
1290 nfs4_file_put_access(stp
->st_stid
.sc_file
, i
);
1291 clear_access(i
, stp
);
1295 static inline void nfs4_free_stateowner(struct nfs4_stateowner
*sop
)
1297 kfree(sop
->so_owner
.data
);
1298 sop
->so_ops
->so_free(sop
);
1301 static void nfs4_put_stateowner(struct nfs4_stateowner
*sop
)
1303 struct nfs4_client
*clp
= sop
->so_client
;
1305 might_lock(&clp
->cl_lock
);
1307 if (!atomic_dec_and_lock(&sop
->so_count
, &clp
->cl_lock
))
1309 sop
->so_ops
->so_unhash(sop
);
1310 spin_unlock(&clp
->cl_lock
);
1311 nfs4_free_stateowner(sop
);
1315 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid
*stp
)
1317 return list_empty(&stp
->st_perfile
);
1320 static bool unhash_ol_stateid(struct nfs4_ol_stateid
*stp
)
1322 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
1324 lockdep_assert_held(&stp
->st_stateowner
->so_client
->cl_lock
);
1326 if (list_empty(&stp
->st_perfile
))
1329 spin_lock(&fp
->fi_lock
);
1330 list_del_init(&stp
->st_perfile
);
1331 spin_unlock(&fp
->fi_lock
);
1332 list_del(&stp
->st_perstateowner
);
1336 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
)
1338 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1340 put_clnt_odstate(stp
->st_clnt_odstate
);
1341 release_all_access(stp
);
1342 if (stp
->st_stateowner
)
1343 nfs4_put_stateowner(stp
->st_stateowner
);
1344 kmem_cache_free(stateid_slab
, stid
);
1347 static void nfs4_free_lock_stateid(struct nfs4_stid
*stid
)
1349 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
1350 struct nfs4_lockowner
*lo
= lockowner(stp
->st_stateowner
);
1351 struct nfsd_file
*nf
;
1353 nf
= find_any_file(stp
->st_stid
.sc_file
);
1355 get_file(nf
->nf_file
);
1356 filp_close(nf
->nf_file
, (fl_owner_t
)lo
);
1359 nfs4_free_ol_stateid(stid
);
1363 * Put the persistent reference to an already unhashed generic stateid, while
1364 * holding the cl_lock. If it's the last reference, then put it onto the
1365 * reaplist for later destruction.
1367 static void put_ol_stateid_locked(struct nfs4_ol_stateid
*stp
,
1368 struct list_head
*reaplist
)
1370 struct nfs4_stid
*s
= &stp
->st_stid
;
1371 struct nfs4_client
*clp
= s
->sc_client
;
1373 lockdep_assert_held(&clp
->cl_lock
);
1375 WARN_ON_ONCE(!list_empty(&stp
->st_locks
));
1377 if (!refcount_dec_and_test(&s
->sc_count
)) {
1378 wake_up_all(&close_wq
);
1382 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
1383 list_add(&stp
->st_locks
, reaplist
);
1386 static bool unhash_lock_stateid(struct nfs4_ol_stateid
*stp
)
1388 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1390 if (!unhash_ol_stateid(stp
))
1392 list_del_init(&stp
->st_locks
);
1393 nfs4_unhash_stid(&stp
->st_stid
);
1397 static void release_lock_stateid(struct nfs4_ol_stateid
*stp
)
1399 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
1402 spin_lock(&clp
->cl_lock
);
1403 unhashed
= unhash_lock_stateid(stp
);
1404 spin_unlock(&clp
->cl_lock
);
1406 nfs4_put_stid(&stp
->st_stid
);
1409 static void unhash_lockowner_locked(struct nfs4_lockowner
*lo
)
1411 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
1413 lockdep_assert_held(&clp
->cl_lock
);
1415 list_del_init(&lo
->lo_owner
.so_strhash
);
1419 * Free a list of generic stateids that were collected earlier after being
1423 free_ol_stateid_reaplist(struct list_head
*reaplist
)
1425 struct nfs4_ol_stateid
*stp
;
1426 struct nfs4_file
*fp
;
1430 while (!list_empty(reaplist
)) {
1431 stp
= list_first_entry(reaplist
, struct nfs4_ol_stateid
,
1433 list_del(&stp
->st_locks
);
1434 fp
= stp
->st_stid
.sc_file
;
1435 stp
->st_stid
.sc_free(&stp
->st_stid
);
1441 static void release_open_stateid_locks(struct nfs4_ol_stateid
*open_stp
,
1442 struct list_head
*reaplist
)
1444 struct nfs4_ol_stateid
*stp
;
1446 lockdep_assert_held(&open_stp
->st_stid
.sc_client
->cl_lock
);
1448 while (!list_empty(&open_stp
->st_locks
)) {
1449 stp
= list_entry(open_stp
->st_locks
.next
,
1450 struct nfs4_ol_stateid
, st_locks
);
1451 WARN_ON(!unhash_lock_stateid(stp
));
1452 put_ol_stateid_locked(stp
, reaplist
);
1456 static bool unhash_open_stateid(struct nfs4_ol_stateid
*stp
,
1457 struct list_head
*reaplist
)
1459 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1461 if (!unhash_ol_stateid(stp
))
1463 release_open_stateid_locks(stp
, reaplist
);
1467 static void release_open_stateid(struct nfs4_ol_stateid
*stp
)
1469 LIST_HEAD(reaplist
);
1471 spin_lock(&stp
->st_stid
.sc_client
->cl_lock
);
1472 if (unhash_open_stateid(stp
, &reaplist
))
1473 put_ol_stateid_locked(stp
, &reaplist
);
1474 spin_unlock(&stp
->st_stid
.sc_client
->cl_lock
);
1475 free_ol_stateid_reaplist(&reaplist
);
1478 static void unhash_openowner_locked(struct nfs4_openowner
*oo
)
1480 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1482 lockdep_assert_held(&clp
->cl_lock
);
1484 list_del_init(&oo
->oo_owner
.so_strhash
);
1485 list_del_init(&oo
->oo_perclient
);
1488 static void release_last_closed_stateid(struct nfs4_openowner
*oo
)
1490 struct nfsd_net
*nn
= net_generic(oo
->oo_owner
.so_client
->net
,
1492 struct nfs4_ol_stateid
*s
;
1494 spin_lock(&nn
->client_lock
);
1495 s
= oo
->oo_last_closed_stid
;
1497 list_del_init(&oo
->oo_close_lru
);
1498 oo
->oo_last_closed_stid
= NULL
;
1500 spin_unlock(&nn
->client_lock
);
1502 nfs4_put_stid(&s
->st_stid
);
1505 static void release_openowner(struct nfs4_openowner
*oo
)
1507 struct nfs4_ol_stateid
*stp
;
1508 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1509 struct list_head reaplist
;
1511 INIT_LIST_HEAD(&reaplist
);
1513 spin_lock(&clp
->cl_lock
);
1514 unhash_openowner_locked(oo
);
1515 while (!list_empty(&oo
->oo_owner
.so_stateids
)) {
1516 stp
= list_first_entry(&oo
->oo_owner
.so_stateids
,
1517 struct nfs4_ol_stateid
, st_perstateowner
);
1518 if (unhash_open_stateid(stp
, &reaplist
))
1519 put_ol_stateid_locked(stp
, &reaplist
);
1521 spin_unlock(&clp
->cl_lock
);
1522 free_ol_stateid_reaplist(&reaplist
);
1523 release_last_closed_stateid(oo
);
1524 nfs4_put_stateowner(&oo
->oo_owner
);
1528 hash_sessionid(struct nfs4_sessionid
*sessionid
)
1530 struct nfsd4_sessionid
*sid
= (struct nfsd4_sessionid
*)sessionid
;
1532 return sid
->sequence
% SESSION_HASH_SIZE
;
1535 #ifdef CONFIG_SUNRPC_DEBUG
1537 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1539 u32
*ptr
= (u32
*)(&sessionid
->data
[0]);
1540 dprintk("%s: %u:%u:%u:%u\n", fn
, ptr
[0], ptr
[1], ptr
[2], ptr
[3]);
1544 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1550 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1551 * won't be used for replay.
1553 void nfsd4_bump_seqid(struct nfsd4_compound_state
*cstate
, __be32 nfserr
)
1555 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
1557 if (nfserr
== nfserr_replay_me
)
1560 if (!seqid_mutating_err(ntohl(nfserr
))) {
1561 nfsd4_cstate_clear_replay(cstate
);
1566 if (so
->so_is_open_owner
)
1567 release_last_closed_stateid(openowner(so
));
1573 gen_sessionid(struct nfsd4_session
*ses
)
1575 struct nfs4_client
*clp
= ses
->se_client
;
1576 struct nfsd4_sessionid
*sid
;
1578 sid
= (struct nfsd4_sessionid
*)ses
->se_sessionid
.data
;
1579 sid
->clientid
= clp
->cl_clientid
;
1580 sid
->sequence
= current_sessionid
++;
1585 * The protocol defines ca_maxresponssize_cached to include the size of
1586 * the rpc header, but all we need to cache is the data starting after
1587 * the end of the initial SEQUENCE operation--the rest we regenerate
1588 * each time. Therefore we can advertise a ca_maxresponssize_cached
1589 * value that is the number of bytes in our cache plus a few additional
1590 * bytes. In order to stay on the safe side, and not promise more than
1591 * we can cache, those additional bytes must be the minimum possible: 24
1592 * bytes of rpc header (xid through accept state, with AUTH_NULL
1593 * verifier), 12 for the compound header (with zero-length tag), and 44
1594 * for the SEQUENCE op response:
1596 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1599 free_session_slots(struct nfsd4_session
*ses
)
1603 for (i
= 0; i
< ses
->se_fchannel
.maxreqs
; i
++) {
1604 free_svc_cred(&ses
->se_slots
[i
]->sl_cred
);
1605 kfree(ses
->se_slots
[i
]);
1610 * We don't actually need to cache the rpc and session headers, so we
1611 * can allocate a little less for each slot:
1613 static inline u32
slot_bytes(struct nfsd4_channel_attrs
*ca
)
1617 if (ca
->maxresp_cached
< NFSD_MIN_HDR_SEQ_SZ
)
1620 size
= ca
->maxresp_cached
- NFSD_MIN_HDR_SEQ_SZ
;
1621 return size
+ sizeof(struct nfsd4_slot
);
1625 * XXX: If we run out of reserved DRC memory we could (up to a point)
1626 * re-negotiate active sessions and reduce their slot usage to make
1627 * room for new connections. For now we just fail the create session.
1629 static u32
nfsd4_get_drc_mem(struct nfsd4_channel_attrs
*ca
, struct nfsd_net
*nn
)
1631 u32 slotsize
= slot_bytes(ca
);
1632 u32 num
= ca
->maxreqs
;
1633 unsigned long avail
, total_avail
;
1634 unsigned int scale_factor
;
1636 spin_lock(&nfsd_drc_lock
);
1637 if (nfsd_drc_max_mem
> nfsd_drc_mem_used
)
1638 total_avail
= nfsd_drc_max_mem
- nfsd_drc_mem_used
;
1640 /* We have handed out more space than we chose in
1641 * set_max_drc() to allow. That isn't really a
1642 * problem as long as that doesn't make us think we
1643 * have lots more due to integer overflow.
1646 avail
= min((unsigned long)NFSD_MAX_MEM_PER_SESSION
, total_avail
);
1648 * Never use more than a fraction of the remaining memory,
1649 * unless it's the only way to give this client a slot.
1650 * The chosen fraction is either 1/8 or 1/number of threads,
1651 * whichever is smaller. This ensures there are adequate
1652 * slots to support multiple clients per thread.
1653 * Give the client one slot even if that would require
1654 * over-allocation--it is better than failure.
1656 scale_factor
= max_t(unsigned int, 8, nn
->nfsd_serv
->sv_nrthreads
);
1658 avail
= clamp_t(unsigned long, avail
, slotsize
,
1659 total_avail
/scale_factor
);
1660 num
= min_t(int, num
, avail
/ slotsize
);
1661 num
= max_t(int, num
, 1);
1662 nfsd_drc_mem_used
+= num
* slotsize
;
1663 spin_unlock(&nfsd_drc_lock
);
1668 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs
*ca
)
1670 int slotsize
= slot_bytes(ca
);
1672 spin_lock(&nfsd_drc_lock
);
1673 nfsd_drc_mem_used
-= slotsize
* ca
->maxreqs
;
1674 spin_unlock(&nfsd_drc_lock
);
1677 static struct nfsd4_session
*alloc_session(struct nfsd4_channel_attrs
*fattrs
,
1678 struct nfsd4_channel_attrs
*battrs
)
1680 int numslots
= fattrs
->maxreqs
;
1681 int slotsize
= slot_bytes(fattrs
);
1682 struct nfsd4_session
*new;
1685 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION
* sizeof(struct nfsd4_slot
*)
1686 + sizeof(struct nfsd4_session
) > PAGE_SIZE
);
1687 mem
= numslots
* sizeof(struct nfsd4_slot
*);
1689 new = kzalloc(sizeof(*new) + mem
, GFP_KERNEL
);
1692 /* allocate each struct nfsd4_slot and data cache in one piece */
1693 for (i
= 0; i
< numslots
; i
++) {
1694 new->se_slots
[i
] = kzalloc(slotsize
, GFP_KERNEL
);
1695 if (!new->se_slots
[i
])
1699 memcpy(&new->se_fchannel
, fattrs
, sizeof(struct nfsd4_channel_attrs
));
1700 memcpy(&new->se_bchannel
, battrs
, sizeof(struct nfsd4_channel_attrs
));
1705 kfree(new->se_slots
[i
]);
1710 static void free_conn(struct nfsd4_conn
*c
)
1712 svc_xprt_put(c
->cn_xprt
);
1716 static void nfsd4_conn_lost(struct svc_xpt_user
*u
)
1718 struct nfsd4_conn
*c
= container_of(u
, struct nfsd4_conn
, cn_xpt_user
);
1719 struct nfs4_client
*clp
= c
->cn_session
->se_client
;
1721 spin_lock(&clp
->cl_lock
);
1722 if (!list_empty(&c
->cn_persession
)) {
1723 list_del(&c
->cn_persession
);
1726 nfsd4_probe_callback(clp
);
1727 spin_unlock(&clp
->cl_lock
);
1730 static struct nfsd4_conn
*alloc_conn(struct svc_rqst
*rqstp
, u32 flags
)
1732 struct nfsd4_conn
*conn
;
1734 conn
= kmalloc(sizeof(struct nfsd4_conn
), GFP_KERNEL
);
1737 svc_xprt_get(rqstp
->rq_xprt
);
1738 conn
->cn_xprt
= rqstp
->rq_xprt
;
1739 conn
->cn_flags
= flags
;
1740 INIT_LIST_HEAD(&conn
->cn_xpt_user
.list
);
1744 static void __nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1746 conn
->cn_session
= ses
;
1747 list_add(&conn
->cn_persession
, &ses
->se_conns
);
1750 static void nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1752 struct nfs4_client
*clp
= ses
->se_client
;
1754 spin_lock(&clp
->cl_lock
);
1755 __nfsd4_hash_conn(conn
, ses
);
1756 spin_unlock(&clp
->cl_lock
);
1759 static int nfsd4_register_conn(struct nfsd4_conn
*conn
)
1761 conn
->cn_xpt_user
.callback
= nfsd4_conn_lost
;
1762 return register_xpt_user(conn
->cn_xprt
, &conn
->cn_xpt_user
);
1765 static void nfsd4_init_conn(struct svc_rqst
*rqstp
, struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1769 nfsd4_hash_conn(conn
, ses
);
1770 ret
= nfsd4_register_conn(conn
);
1772 /* oops; xprt is already down: */
1773 nfsd4_conn_lost(&conn
->cn_xpt_user
);
1774 /* We may have gained or lost a callback channel: */
1775 nfsd4_probe_callback_sync(ses
->se_client
);
1778 static struct nfsd4_conn
*alloc_conn_from_crses(struct svc_rqst
*rqstp
, struct nfsd4_create_session
*cses
)
1780 u32 dir
= NFS4_CDFC4_FORE
;
1782 if (cses
->flags
& SESSION4_BACK_CHAN
)
1783 dir
|= NFS4_CDFC4_BACK
;
1784 return alloc_conn(rqstp
, dir
);
1787 /* must be called under client_lock */
1788 static void nfsd4_del_conns(struct nfsd4_session
*s
)
1790 struct nfs4_client
*clp
= s
->se_client
;
1791 struct nfsd4_conn
*c
;
1793 spin_lock(&clp
->cl_lock
);
1794 while (!list_empty(&s
->se_conns
)) {
1795 c
= list_first_entry(&s
->se_conns
, struct nfsd4_conn
, cn_persession
);
1796 list_del_init(&c
->cn_persession
);
1797 spin_unlock(&clp
->cl_lock
);
1799 unregister_xpt_user(c
->cn_xprt
, &c
->cn_xpt_user
);
1802 spin_lock(&clp
->cl_lock
);
1804 spin_unlock(&clp
->cl_lock
);
1807 static void __free_session(struct nfsd4_session
*ses
)
1809 free_session_slots(ses
);
1813 static void free_session(struct nfsd4_session
*ses
)
1815 nfsd4_del_conns(ses
);
1816 nfsd4_put_drc_mem(&ses
->se_fchannel
);
1817 __free_session(ses
);
1820 static void init_session(struct svc_rqst
*rqstp
, struct nfsd4_session
*new, struct nfs4_client
*clp
, struct nfsd4_create_session
*cses
)
1823 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
1825 new->se_client
= clp
;
1828 INIT_LIST_HEAD(&new->se_conns
);
1830 new->se_cb_seq_nr
= 1;
1831 new->se_flags
= cses
->flags
;
1832 new->se_cb_prog
= cses
->callback_prog
;
1833 new->se_cb_sec
= cses
->cb_sec
;
1834 atomic_set(&new->se_ref
, 0);
1835 idx
= hash_sessionid(&new->se_sessionid
);
1836 list_add(&new->se_hash
, &nn
->sessionid_hashtbl
[idx
]);
1837 spin_lock(&clp
->cl_lock
);
1838 list_add(&new->se_perclnt
, &clp
->cl_sessions
);
1839 spin_unlock(&clp
->cl_lock
);
1842 struct sockaddr
*sa
= svc_addr(rqstp
);
1844 * This is a little silly; with sessions there's no real
1845 * use for the callback address. Use the peer address
1846 * as a reasonable default for now, but consider fixing
1847 * the rpc client not to require an address in the
1850 rpc_copy_addr((struct sockaddr
*)&clp
->cl_cb_conn
.cb_addr
, sa
);
1851 clp
->cl_cb_conn
.cb_addrlen
= svc_addr_len(sa
);
1855 /* caller must hold client_lock */
1856 static struct nfsd4_session
*
1857 __find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
)
1859 struct nfsd4_session
*elem
;
1861 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
1863 lockdep_assert_held(&nn
->client_lock
);
1865 dump_sessionid(__func__
, sessionid
);
1866 idx
= hash_sessionid(sessionid
);
1867 /* Search in the appropriate list */
1868 list_for_each_entry(elem
, &nn
->sessionid_hashtbl
[idx
], se_hash
) {
1869 if (!memcmp(elem
->se_sessionid
.data
, sessionid
->data
,
1870 NFS4_MAX_SESSIONID_LEN
)) {
1875 dprintk("%s: session not found\n", __func__
);
1879 static struct nfsd4_session
*
1880 find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
,
1883 struct nfsd4_session
*session
;
1884 __be32 status
= nfserr_badsession
;
1886 session
= __find_in_sessionid_hashtbl(sessionid
, net
);
1889 status
= nfsd4_get_session_locked(session
);
1897 /* caller must hold client_lock */
1899 unhash_session(struct nfsd4_session
*ses
)
1901 struct nfs4_client
*clp
= ses
->se_client
;
1902 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1904 lockdep_assert_held(&nn
->client_lock
);
1906 list_del(&ses
->se_hash
);
1907 spin_lock(&ses
->se_client
->cl_lock
);
1908 list_del(&ses
->se_perclnt
);
1909 spin_unlock(&ses
->se_client
->cl_lock
);
1912 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1914 STALE_CLIENTID(clientid_t
*clid
, struct nfsd_net
*nn
)
1917 * We're assuming the clid was not given out from a boot
1918 * precisely 2^32 (about 136 years) before this one. That seems
1919 * a safe assumption:
1921 if (clid
->cl_boot
== (u32
)nn
->boot_time
)
1923 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08llx\n",
1924 clid
->cl_boot
, clid
->cl_id
, nn
->boot_time
);
1929 * XXX Should we use a slab cache ?
1930 * This type of memory management is somewhat inefficient, but we use it
1931 * anyway since SETCLIENTID is not a common operation.
1933 static struct nfs4_client
*alloc_client(struct xdr_netobj name
)
1935 struct nfs4_client
*clp
;
1938 clp
= kmem_cache_zalloc(client_slab
, GFP_KERNEL
);
1941 xdr_netobj_dup(&clp
->cl_name
, &name
, GFP_KERNEL
);
1942 if (clp
->cl_name
.data
== NULL
)
1944 clp
->cl_ownerstr_hashtbl
= kmalloc_array(OWNER_HASH_SIZE
,
1945 sizeof(struct list_head
),
1947 if (!clp
->cl_ownerstr_hashtbl
)
1948 goto err_no_hashtbl
;
1949 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++)
1950 INIT_LIST_HEAD(&clp
->cl_ownerstr_hashtbl
[i
]);
1951 INIT_LIST_HEAD(&clp
->cl_sessions
);
1952 idr_init(&clp
->cl_stateids
);
1953 atomic_set(&clp
->cl_rpc_users
, 0);
1954 clp
->cl_cb_state
= NFSD4_CB_UNKNOWN
;
1955 INIT_LIST_HEAD(&clp
->cl_idhash
);
1956 INIT_LIST_HEAD(&clp
->cl_openowners
);
1957 INIT_LIST_HEAD(&clp
->cl_delegations
);
1958 INIT_LIST_HEAD(&clp
->cl_lru
);
1959 INIT_LIST_HEAD(&clp
->cl_revoked
);
1960 #ifdef CONFIG_NFSD_PNFS
1961 INIT_LIST_HEAD(&clp
->cl_lo_states
);
1963 INIT_LIST_HEAD(&clp
->async_copies
);
1964 spin_lock_init(&clp
->async_lock
);
1965 spin_lock_init(&clp
->cl_lock
);
1966 rpc_init_wait_queue(&clp
->cl_cb_waitq
, "Backchannel slot table");
1969 kfree(clp
->cl_name
.data
);
1971 kmem_cache_free(client_slab
, clp
);
1975 static void __free_client(struct kref
*k
)
1977 struct nfsdfs_client
*c
= container_of(k
, struct nfsdfs_client
, cl_ref
);
1978 struct nfs4_client
*clp
= container_of(c
, struct nfs4_client
, cl_nfsdfs
);
1980 free_svc_cred(&clp
->cl_cred
);
1981 kfree(clp
->cl_ownerstr_hashtbl
);
1982 kfree(clp
->cl_name
.data
);
1983 kfree(clp
->cl_nii_domain
.data
);
1984 kfree(clp
->cl_nii_name
.data
);
1985 idr_destroy(&clp
->cl_stateids
);
1986 kmem_cache_free(client_slab
, clp
);
1989 static void drop_client(struct nfs4_client
*clp
)
1991 kref_put(&clp
->cl_nfsdfs
.cl_ref
, __free_client
);
1995 free_client(struct nfs4_client
*clp
)
1997 while (!list_empty(&clp
->cl_sessions
)) {
1998 struct nfsd4_session
*ses
;
1999 ses
= list_entry(clp
->cl_sessions
.next
, struct nfsd4_session
,
2001 list_del(&ses
->se_perclnt
);
2002 WARN_ON_ONCE(atomic_read(&ses
->se_ref
));
2005 rpc_destroy_wait_queue(&clp
->cl_cb_waitq
);
2006 if (clp
->cl_nfsd_dentry
) {
2007 nfsd_client_rmdir(clp
->cl_nfsd_dentry
);
2008 clp
->cl_nfsd_dentry
= NULL
;
2009 wake_up_all(&expiry_wq
);
2014 /* must be called under the client_lock */
2016 unhash_client_locked(struct nfs4_client
*clp
)
2018 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2019 struct nfsd4_session
*ses
;
2021 lockdep_assert_held(&nn
->client_lock
);
2023 /* Mark the client as expired! */
2025 /* Make it invisible */
2026 if (!list_empty(&clp
->cl_idhash
)) {
2027 list_del_init(&clp
->cl_idhash
);
2028 if (test_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
))
2029 rb_erase(&clp
->cl_namenode
, &nn
->conf_name_tree
);
2031 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
2033 list_del_init(&clp
->cl_lru
);
2034 spin_lock(&clp
->cl_lock
);
2035 list_for_each_entry(ses
, &clp
->cl_sessions
, se_perclnt
)
2036 list_del_init(&ses
->se_hash
);
2037 spin_unlock(&clp
->cl_lock
);
2041 unhash_client(struct nfs4_client
*clp
)
2043 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2045 spin_lock(&nn
->client_lock
);
2046 unhash_client_locked(clp
);
2047 spin_unlock(&nn
->client_lock
);
2050 static __be32
mark_client_expired_locked(struct nfs4_client
*clp
)
2052 if (atomic_read(&clp
->cl_rpc_users
))
2053 return nfserr_jukebox
;
2054 unhash_client_locked(clp
);
2059 __destroy_client(struct nfs4_client
*clp
)
2062 struct nfs4_openowner
*oo
;
2063 struct nfs4_delegation
*dp
;
2064 struct list_head reaplist
;
2066 INIT_LIST_HEAD(&reaplist
);
2067 spin_lock(&state_lock
);
2068 while (!list_empty(&clp
->cl_delegations
)) {
2069 dp
= list_entry(clp
->cl_delegations
.next
, struct nfs4_delegation
, dl_perclnt
);
2070 WARN_ON(!unhash_delegation_locked(dp
));
2071 list_add(&dp
->dl_recall_lru
, &reaplist
);
2073 spin_unlock(&state_lock
);
2074 while (!list_empty(&reaplist
)) {
2075 dp
= list_entry(reaplist
.next
, struct nfs4_delegation
, dl_recall_lru
);
2076 list_del_init(&dp
->dl_recall_lru
);
2077 destroy_unhashed_deleg(dp
);
2079 while (!list_empty(&clp
->cl_revoked
)) {
2080 dp
= list_entry(clp
->cl_revoked
.next
, struct nfs4_delegation
, dl_recall_lru
);
2081 list_del_init(&dp
->dl_recall_lru
);
2082 nfs4_put_stid(&dp
->dl_stid
);
2084 while (!list_empty(&clp
->cl_openowners
)) {
2085 oo
= list_entry(clp
->cl_openowners
.next
, struct nfs4_openowner
, oo_perclient
);
2086 nfs4_get_stateowner(&oo
->oo_owner
);
2087 release_openowner(oo
);
2089 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++) {
2090 struct nfs4_stateowner
*so
, *tmp
;
2092 list_for_each_entry_safe(so
, tmp
, &clp
->cl_ownerstr_hashtbl
[i
],
2094 /* Should be no openowners at this point */
2095 WARN_ON_ONCE(so
->so_is_open_owner
);
2096 remove_blocked_locks(lockowner(so
));
2099 nfsd4_return_all_client_layouts(clp
);
2100 nfsd4_shutdown_copy(clp
);
2101 nfsd4_shutdown_callback(clp
);
2102 if (clp
->cl_cb_conn
.cb_xprt
)
2103 svc_xprt_put(clp
->cl_cb_conn
.cb_xprt
);
2105 wake_up_all(&expiry_wq
);
2109 destroy_client(struct nfs4_client
*clp
)
2112 __destroy_client(clp
);
2115 static void inc_reclaim_complete(struct nfs4_client
*clp
)
2117 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2119 if (!nn
->track_reclaim_completes
)
2121 if (!nfsd4_find_reclaim_client(clp
->cl_name
, nn
))
2123 if (atomic_inc_return(&nn
->nr_reclaim_complete
) ==
2124 nn
->reclaim_str_hashtbl_size
) {
2125 printk(KERN_INFO
"NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2127 nfsd4_end_grace(nn
);
2131 static void expire_client(struct nfs4_client
*clp
)
2134 nfsd4_client_record_remove(clp
);
2135 __destroy_client(clp
);
2138 static void copy_verf(struct nfs4_client
*target
, nfs4_verifier
*source
)
2140 memcpy(target
->cl_verifier
.data
, source
->data
,
2141 sizeof(target
->cl_verifier
.data
));
2144 static void copy_clid(struct nfs4_client
*target
, struct nfs4_client
*source
)
2146 target
->cl_clientid
.cl_boot
= source
->cl_clientid
.cl_boot
;
2147 target
->cl_clientid
.cl_id
= source
->cl_clientid
.cl_id
;
2150 static int copy_cred(struct svc_cred
*target
, struct svc_cred
*source
)
2152 target
->cr_principal
= kstrdup(source
->cr_principal
, GFP_KERNEL
);
2153 target
->cr_raw_principal
= kstrdup(source
->cr_raw_principal
,
2155 target
->cr_targ_princ
= kstrdup(source
->cr_targ_princ
, GFP_KERNEL
);
2156 if ((source
->cr_principal
&& !target
->cr_principal
) ||
2157 (source
->cr_raw_principal
&& !target
->cr_raw_principal
) ||
2158 (source
->cr_targ_princ
&& !target
->cr_targ_princ
))
2161 target
->cr_flavor
= source
->cr_flavor
;
2162 target
->cr_uid
= source
->cr_uid
;
2163 target
->cr_gid
= source
->cr_gid
;
2164 target
->cr_group_info
= source
->cr_group_info
;
2165 get_group_info(target
->cr_group_info
);
2166 target
->cr_gss_mech
= source
->cr_gss_mech
;
2167 if (source
->cr_gss_mech
)
2168 gss_mech_get(source
->cr_gss_mech
);
2173 compare_blob(const struct xdr_netobj
*o1
, const struct xdr_netobj
*o2
)
2175 if (o1
->len
< o2
->len
)
2177 if (o1
->len
> o2
->len
)
2179 return memcmp(o1
->data
, o2
->data
, o1
->len
);
2183 same_verf(nfs4_verifier
*v1
, nfs4_verifier
*v2
)
2185 return 0 == memcmp(v1
->data
, v2
->data
, sizeof(v1
->data
));
2189 same_clid(clientid_t
*cl1
, clientid_t
*cl2
)
2191 return (cl1
->cl_boot
== cl2
->cl_boot
) && (cl1
->cl_id
== cl2
->cl_id
);
2194 static bool groups_equal(struct group_info
*g1
, struct group_info
*g2
)
2198 if (g1
->ngroups
!= g2
->ngroups
)
2200 for (i
=0; i
<g1
->ngroups
; i
++)
2201 if (!gid_eq(g1
->gid
[i
], g2
->gid
[i
]))
2207 * RFC 3530 language requires clid_inuse be returned when the
2208 * "principal" associated with a requests differs from that previously
2209 * used. We use uid, gid's, and gss principal string as our best
2210 * approximation. We also don't want to allow non-gss use of a client
2211 * established using gss: in theory cr_principal should catch that
2212 * change, but in practice cr_principal can be null even in the gss case
2213 * since gssd doesn't always pass down a principal string.
2215 static bool is_gss_cred(struct svc_cred
*cr
)
2217 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2218 return (cr
->cr_flavor
> RPC_AUTH_MAXFLAVOR
);
2223 same_creds(struct svc_cred
*cr1
, struct svc_cred
*cr2
)
2225 if ((is_gss_cred(cr1
) != is_gss_cred(cr2
))
2226 || (!uid_eq(cr1
->cr_uid
, cr2
->cr_uid
))
2227 || (!gid_eq(cr1
->cr_gid
, cr2
->cr_gid
))
2228 || !groups_equal(cr1
->cr_group_info
, cr2
->cr_group_info
))
2230 /* XXX: check that cr_targ_princ fields match ? */
2231 if (cr1
->cr_principal
== cr2
->cr_principal
)
2233 if (!cr1
->cr_principal
|| !cr2
->cr_principal
)
2235 return 0 == strcmp(cr1
->cr_principal
, cr2
->cr_principal
);
2238 static bool svc_rqst_integrity_protected(struct svc_rqst
*rqstp
)
2240 struct svc_cred
*cr
= &rqstp
->rq_cred
;
2243 if (!cr
->cr_gss_mech
)
2245 service
= gss_pseudoflavor_to_service(cr
->cr_gss_mech
, cr
->cr_flavor
);
2246 return service
== RPC_GSS_SVC_INTEGRITY
||
2247 service
== RPC_GSS_SVC_PRIVACY
;
2250 bool nfsd4_mach_creds_match(struct nfs4_client
*cl
, struct svc_rqst
*rqstp
)
2252 struct svc_cred
*cr
= &rqstp
->rq_cred
;
2254 if (!cl
->cl_mach_cred
)
2256 if (cl
->cl_cred
.cr_gss_mech
!= cr
->cr_gss_mech
)
2258 if (!svc_rqst_integrity_protected(rqstp
))
2260 if (cl
->cl_cred
.cr_raw_principal
)
2261 return 0 == strcmp(cl
->cl_cred
.cr_raw_principal
,
2262 cr
->cr_raw_principal
);
2263 if (!cr
->cr_principal
)
2265 return 0 == strcmp(cl
->cl_cred
.cr_principal
, cr
->cr_principal
);
2268 static void gen_confirm(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
2273 * This is opaque to client, so no need to byte-swap. Use
2274 * __force to keep sparse happy
2276 verf
[0] = (__force __be32
)(u32
)ktime_get_real_seconds();
2277 verf
[1] = (__force __be32
)nn
->clverifier_counter
++;
2278 memcpy(clp
->cl_confirm
.data
, verf
, sizeof(clp
->cl_confirm
.data
));
2281 static void gen_clid(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
2283 clp
->cl_clientid
.cl_boot
= (u32
)nn
->boot_time
;
2284 clp
->cl_clientid
.cl_id
= nn
->clientid_counter
++;
2285 gen_confirm(clp
, nn
);
2288 static struct nfs4_stid
*
2289 find_stateid_locked(struct nfs4_client
*cl
, stateid_t
*t
)
2291 struct nfs4_stid
*ret
;
2293 ret
= idr_find(&cl
->cl_stateids
, t
->si_opaque
.so_id
);
2294 if (!ret
|| !ret
->sc_type
)
2299 static struct nfs4_stid
*
2300 find_stateid_by_type(struct nfs4_client
*cl
, stateid_t
*t
, char typemask
)
2302 struct nfs4_stid
*s
;
2304 spin_lock(&cl
->cl_lock
);
2305 s
= find_stateid_locked(cl
, t
);
2307 if (typemask
& s
->sc_type
)
2308 refcount_inc(&s
->sc_count
);
2312 spin_unlock(&cl
->cl_lock
);
2316 static struct nfs4_client
*get_nfsdfs_clp(struct inode
*inode
)
2318 struct nfsdfs_client
*nc
;
2319 nc
= get_nfsdfs_client(inode
);
2322 return container_of(nc
, struct nfs4_client
, cl_nfsdfs
);
2325 static void seq_quote_mem(struct seq_file
*m
, char *data
, int len
)
2327 seq_printf(m
, "\"");
2328 seq_escape_mem_ascii(m
, data
, len
);
2329 seq_printf(m
, "\"");
2332 static int client_info_show(struct seq_file
*m
, void *v
)
2334 struct inode
*inode
= m
->private;
2335 struct nfs4_client
*clp
;
2338 clp
= get_nfsdfs_clp(inode
);
2341 memcpy(&clid
, &clp
->cl_clientid
, sizeof(clid
));
2342 seq_printf(m
, "clientid: 0x%llx\n", clid
);
2343 seq_printf(m
, "address: \"%pISpc\"\n", (struct sockaddr
*)&clp
->cl_addr
);
2344 seq_printf(m
, "name: ");
2345 seq_quote_mem(m
, clp
->cl_name
.data
, clp
->cl_name
.len
);
2346 seq_printf(m
, "\nminor version: %d\n", clp
->cl_minorversion
);
2347 if (clp
->cl_nii_domain
.data
) {
2348 seq_printf(m
, "Implementation domain: ");
2349 seq_quote_mem(m
, clp
->cl_nii_domain
.data
,
2350 clp
->cl_nii_domain
.len
);
2351 seq_printf(m
, "\nImplementation name: ");
2352 seq_quote_mem(m
, clp
->cl_nii_name
.data
, clp
->cl_nii_name
.len
);
2353 seq_printf(m
, "\nImplementation time: [%lld, %ld]\n",
2354 clp
->cl_nii_time
.tv_sec
, clp
->cl_nii_time
.tv_nsec
);
2361 static int client_info_open(struct inode
*inode
, struct file
*file
)
2363 return single_open(file
, client_info_show
, inode
);
2366 static const struct file_operations client_info_fops
= {
2367 .open
= client_info_open
,
2369 .llseek
= seq_lseek
,
2370 .release
= single_release
,
2373 static void *states_start(struct seq_file
*s
, loff_t
*pos
)
2374 __acquires(&clp
->cl_lock
)
2376 struct nfs4_client
*clp
= s
->private;
2377 unsigned long id
= *pos
;
2380 spin_lock(&clp
->cl_lock
);
2381 ret
= idr_get_next_ul(&clp
->cl_stateids
, &id
);
2386 static void *states_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
2388 struct nfs4_client
*clp
= s
->private;
2389 unsigned long id
= *pos
;
2394 ret
= idr_get_next_ul(&clp
->cl_stateids
, &id
);
2399 static void states_stop(struct seq_file
*s
, void *v
)
2400 __releases(&clp
->cl_lock
)
2402 struct nfs4_client
*clp
= s
->private;
2404 spin_unlock(&clp
->cl_lock
);
2407 static void nfs4_show_superblock(struct seq_file
*s
, struct nfsd_file
*f
)
2409 struct inode
*inode
= f
->nf_inode
;
2411 seq_printf(s
, "superblock: \"%02x:%02x:%ld\"",
2412 MAJOR(inode
->i_sb
->s_dev
),
2413 MINOR(inode
->i_sb
->s_dev
),
2417 static void nfs4_show_owner(struct seq_file
*s
, struct nfs4_stateowner
*oo
)
2419 seq_printf(s
, "owner: ");
2420 seq_quote_mem(s
, oo
->so_owner
.data
, oo
->so_owner
.len
);
2423 static int nfs4_show_open(struct seq_file
*s
, struct nfs4_stid
*st
)
2425 struct nfs4_ol_stateid
*ols
;
2426 struct nfs4_file
*nf
;
2427 struct nfsd_file
*file
;
2428 struct nfs4_stateowner
*oo
;
2429 unsigned int access
, deny
;
2431 if (st
->sc_type
!= NFS4_OPEN_STID
&& st
->sc_type
!= NFS4_LOCK_STID
)
2432 return 0; /* XXX: or SEQ_SKIP? */
2433 ols
= openlockstateid(st
);
2434 oo
= ols
->st_stateowner
;
2436 file
= find_any_file(nf
);
2438 seq_printf(s
, "- 0x%16phN: { type: open, ", &st
->sc_stateid
);
2440 access
= bmap_to_share_mode(ols
->st_access_bmap
);
2441 deny
= bmap_to_share_mode(ols
->st_deny_bmap
);
2443 seq_printf(s
, "access: %s%s, ",
2444 access
& NFS4_SHARE_ACCESS_READ
? "r" : "-",
2445 access
& NFS4_SHARE_ACCESS_WRITE
? "w" : "-");
2446 seq_printf(s
, "deny: %s%s, ",
2447 deny
& NFS4_SHARE_ACCESS_READ
? "r" : "-",
2448 deny
& NFS4_SHARE_ACCESS_WRITE
? "w" : "-");
2450 nfs4_show_superblock(s
, file
);
2451 seq_printf(s
, ", ");
2452 nfs4_show_owner(s
, oo
);
2453 seq_printf(s
, " }\n");
2454 nfsd_file_put(file
);
2459 static int nfs4_show_lock(struct seq_file
*s
, struct nfs4_stid
*st
)
2461 struct nfs4_ol_stateid
*ols
;
2462 struct nfs4_file
*nf
;
2463 struct nfsd_file
*file
;
2464 struct nfs4_stateowner
*oo
;
2466 ols
= openlockstateid(st
);
2467 oo
= ols
->st_stateowner
;
2469 file
= find_any_file(nf
);
2471 seq_printf(s
, "- 0x%16phN: { type: lock, ", &st
->sc_stateid
);
2474 * Note: a lock stateid isn't really the same thing as a lock,
2475 * it's the locking state held by one owner on a file, and there
2476 * may be multiple (or no) lock ranges associated with it.
2477 * (Same for the matter is true of open stateids.)
2480 nfs4_show_superblock(s
, file
);
2481 /* XXX: open stateid? */
2482 seq_printf(s
, ", ");
2483 nfs4_show_owner(s
, oo
);
2484 seq_printf(s
, " }\n");
2485 nfsd_file_put(file
);
2490 static int nfs4_show_deleg(struct seq_file
*s
, struct nfs4_stid
*st
)
2492 struct nfs4_delegation
*ds
;
2493 struct nfs4_file
*nf
;
2494 struct nfsd_file
*file
;
2496 ds
= delegstateid(st
);
2498 file
= nf
->fi_deleg_file
;
2500 seq_printf(s
, "- 0x%16phN: { type: deleg, ", &st
->sc_stateid
);
2502 /* Kinda dead code as long as we only support read delegs: */
2503 seq_printf(s
, "access: %s, ",
2504 ds
->dl_type
== NFS4_OPEN_DELEGATE_READ
? "r" : "w");
2506 /* XXX: lease time, whether it's being recalled. */
2508 nfs4_show_superblock(s
, file
);
2509 seq_printf(s
, " }\n");
2514 static int nfs4_show_layout(struct seq_file
*s
, struct nfs4_stid
*st
)
2516 struct nfs4_layout_stateid
*ls
;
2517 struct nfsd_file
*file
;
2519 ls
= container_of(st
, struct nfs4_layout_stateid
, ls_stid
);
2522 seq_printf(s
, "- 0x%16phN: { type: layout, ", &st
->sc_stateid
);
2524 /* XXX: What else would be useful? */
2526 nfs4_show_superblock(s
, file
);
2527 seq_printf(s
, " }\n");
2532 static int states_show(struct seq_file
*s
, void *v
)
2534 struct nfs4_stid
*st
= v
;
2536 switch (st
->sc_type
) {
2537 case NFS4_OPEN_STID
:
2538 return nfs4_show_open(s
, st
);
2539 case NFS4_LOCK_STID
:
2540 return nfs4_show_lock(s
, st
);
2541 case NFS4_DELEG_STID
:
2542 return nfs4_show_deleg(s
, st
);
2543 case NFS4_LAYOUT_STID
:
2544 return nfs4_show_layout(s
, st
);
2546 return 0; /* XXX: or SEQ_SKIP? */
2548 /* XXX: copy stateids? */
2551 static struct seq_operations states_seq_ops
= {
2552 .start
= states_start
,
2553 .next
= states_next
,
2554 .stop
= states_stop
,
2558 static int client_states_open(struct inode
*inode
, struct file
*file
)
2561 struct nfs4_client
*clp
;
2564 clp
= get_nfsdfs_clp(inode
);
2568 ret
= seq_open(file
, &states_seq_ops
);
2571 s
= file
->private_data
;
2576 static int client_opens_release(struct inode
*inode
, struct file
*file
)
2578 struct seq_file
*m
= file
->private_data
;
2579 struct nfs4_client
*clp
= m
->private;
2581 /* XXX: alternatively, we could get/drop in seq start/stop */
2586 static const struct file_operations client_states_fops
= {
2587 .open
= client_states_open
,
2589 .llseek
= seq_lseek
,
2590 .release
= client_opens_release
,
2594 * Normally we refuse to destroy clients that are in use, but here the
2595 * administrator is telling us to just do it. We also want to wait
2596 * so the caller has a guarantee that the client's locks are gone by
2597 * the time the write returns:
2599 static void force_expire_client(struct nfs4_client
*clp
)
2601 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2602 bool already_expired
;
2604 spin_lock(&clp
->cl_lock
);
2606 spin_unlock(&clp
->cl_lock
);
2608 wait_event(expiry_wq
, atomic_read(&clp
->cl_rpc_users
) == 0);
2609 spin_lock(&nn
->client_lock
);
2610 already_expired
= list_empty(&clp
->cl_lru
);
2611 if (!already_expired
)
2612 unhash_client_locked(clp
);
2613 spin_unlock(&nn
->client_lock
);
2615 if (!already_expired
)
2618 wait_event(expiry_wq
, clp
->cl_nfsd_dentry
== NULL
);
2621 static ssize_t
client_ctl_write(struct file
*file
, const char __user
*buf
,
2622 size_t size
, loff_t
*pos
)
2625 struct nfs4_client
*clp
;
2627 data
= simple_transaction_get(file
, buf
, size
);
2629 return PTR_ERR(data
);
2630 if (size
!= 7 || 0 != memcmp(data
, "expire\n", 7))
2632 clp
= get_nfsdfs_clp(file_inode(file
));
2635 force_expire_client(clp
);
2640 static const struct file_operations client_ctl_fops
= {
2641 .write
= client_ctl_write
,
2642 .release
= simple_transaction_release
,
2645 static const struct tree_descr client_files
[] = {
2646 [0] = {"info", &client_info_fops
, S_IRUSR
},
2647 [1] = {"states", &client_states_fops
, S_IRUSR
},
2648 [2] = {"ctl", &client_ctl_fops
, S_IWUSR
},
2652 static struct nfs4_client
*create_client(struct xdr_netobj name
,
2653 struct svc_rqst
*rqstp
, nfs4_verifier
*verf
)
2655 struct nfs4_client
*clp
;
2656 struct sockaddr
*sa
= svc_addr(rqstp
);
2658 struct net
*net
= SVC_NET(rqstp
);
2659 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2661 clp
= alloc_client(name
);
2665 ret
= copy_cred(&clp
->cl_cred
, &rqstp
->rq_cred
);
2671 kref_init(&clp
->cl_nfsdfs
.cl_ref
);
2672 nfsd4_init_cb(&clp
->cl_cb_null
, clp
, NULL
, NFSPROC4_CLNT_CB_NULL
);
2673 clp
->cl_time
= ktime_get_boottime_seconds();
2674 clear_bit(0, &clp
->cl_cb_slot_busy
);
2675 copy_verf(clp
, verf
);
2676 memcpy(&clp
->cl_addr
, sa
, sizeof(struct sockaddr_storage
));
2677 clp
->cl_cb_session
= NULL
;
2679 clp
->cl_nfsd_dentry
= nfsd_client_mkdir(nn
, &clp
->cl_nfsdfs
,
2680 clp
->cl_clientid
.cl_id
- nn
->clientid_base
,
2682 if (!clp
->cl_nfsd_dentry
) {
2690 add_clp_to_name_tree(struct nfs4_client
*new_clp
, struct rb_root
*root
)
2692 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
2693 struct nfs4_client
*clp
;
2696 clp
= rb_entry(*new, struct nfs4_client
, cl_namenode
);
2699 if (compare_blob(&clp
->cl_name
, &new_clp
->cl_name
) > 0)
2700 new = &((*new)->rb_left
);
2702 new = &((*new)->rb_right
);
2705 rb_link_node(&new_clp
->cl_namenode
, parent
, new);
2706 rb_insert_color(&new_clp
->cl_namenode
, root
);
2709 static struct nfs4_client
*
2710 find_clp_in_name_tree(struct xdr_netobj
*name
, struct rb_root
*root
)
2713 struct rb_node
*node
= root
->rb_node
;
2714 struct nfs4_client
*clp
;
2717 clp
= rb_entry(node
, struct nfs4_client
, cl_namenode
);
2718 cmp
= compare_blob(&clp
->cl_name
, name
);
2720 node
= node
->rb_left
;
2722 node
= node
->rb_right
;
2730 add_to_unconfirmed(struct nfs4_client
*clp
)
2732 unsigned int idhashval
;
2733 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2735 lockdep_assert_held(&nn
->client_lock
);
2737 clear_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2738 add_clp_to_name_tree(clp
, &nn
->unconf_name_tree
);
2739 idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2740 list_add(&clp
->cl_idhash
, &nn
->unconf_id_hashtbl
[idhashval
]);
2741 renew_client_locked(clp
);
2745 move_to_confirmed(struct nfs4_client
*clp
)
2747 unsigned int idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
2748 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
2750 lockdep_assert_held(&nn
->client_lock
);
2752 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp
);
2753 list_move(&clp
->cl_idhash
, &nn
->conf_id_hashtbl
[idhashval
]);
2754 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
2755 add_clp_to_name_tree(clp
, &nn
->conf_name_tree
);
2756 set_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
2757 renew_client_locked(clp
);
2760 static struct nfs4_client
*
2761 find_client_in_id_table(struct list_head
*tbl
, clientid_t
*clid
, bool sessions
)
2763 struct nfs4_client
*clp
;
2764 unsigned int idhashval
= clientid_hashval(clid
->cl_id
);
2766 list_for_each_entry(clp
, &tbl
[idhashval
], cl_idhash
) {
2767 if (same_clid(&clp
->cl_clientid
, clid
)) {
2768 if ((bool)clp
->cl_minorversion
!= sessions
)
2770 renew_client_locked(clp
);
2777 static struct nfs4_client
*
2778 find_confirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2780 struct list_head
*tbl
= nn
->conf_id_hashtbl
;
2782 lockdep_assert_held(&nn
->client_lock
);
2783 return find_client_in_id_table(tbl
, clid
, sessions
);
2786 static struct nfs4_client
*
2787 find_unconfirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
2789 struct list_head
*tbl
= nn
->unconf_id_hashtbl
;
2791 lockdep_assert_held(&nn
->client_lock
);
2792 return find_client_in_id_table(tbl
, clid
, sessions
);
2795 static bool clp_used_exchangeid(struct nfs4_client
*clp
)
2797 return clp
->cl_exchange_flags
!= 0;
2800 static struct nfs4_client
*
2801 find_confirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2803 lockdep_assert_held(&nn
->client_lock
);
2804 return find_clp_in_name_tree(name
, &nn
->conf_name_tree
);
2807 static struct nfs4_client
*
2808 find_unconfirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
2810 lockdep_assert_held(&nn
->client_lock
);
2811 return find_clp_in_name_tree(name
, &nn
->unconf_name_tree
);
2815 gen_callback(struct nfs4_client
*clp
, struct nfsd4_setclientid
*se
, struct svc_rqst
*rqstp
)
2817 struct nfs4_cb_conn
*conn
= &clp
->cl_cb_conn
;
2818 struct sockaddr
*sa
= svc_addr(rqstp
);
2819 u32 scopeid
= rpc_get_scope_id(sa
);
2820 unsigned short expected_family
;
2822 /* Currently, we only support tcp and tcp6 for the callback channel */
2823 if (se
->se_callback_netid_len
== 3 &&
2824 !memcmp(se
->se_callback_netid_val
, "tcp", 3))
2825 expected_family
= AF_INET
;
2826 else if (se
->se_callback_netid_len
== 4 &&
2827 !memcmp(se
->se_callback_netid_val
, "tcp6", 4))
2828 expected_family
= AF_INET6
;
2832 conn
->cb_addrlen
= rpc_uaddr2sockaddr(clp
->net
, se
->se_callback_addr_val
,
2833 se
->se_callback_addr_len
,
2834 (struct sockaddr
*)&conn
->cb_addr
,
2835 sizeof(conn
->cb_addr
));
2837 if (!conn
->cb_addrlen
|| conn
->cb_addr
.ss_family
!= expected_family
)
2840 if (conn
->cb_addr
.ss_family
== AF_INET6
)
2841 ((struct sockaddr_in6
*)&conn
->cb_addr
)->sin6_scope_id
= scopeid
;
2843 conn
->cb_prog
= se
->se_callback_prog
;
2844 conn
->cb_ident
= se
->se_callback_ident
;
2845 memcpy(&conn
->cb_saddr
, &rqstp
->rq_daddr
, rqstp
->rq_daddrlen
);
2848 conn
->cb_addr
.ss_family
= AF_UNSPEC
;
2849 conn
->cb_addrlen
= 0;
2850 dprintk("NFSD: this client (clientid %08x/%08x) "
2851 "will not receive delegations\n",
2852 clp
->cl_clientid
.cl_boot
, clp
->cl_clientid
.cl_id
);
2858 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2861 nfsd4_store_cache_entry(struct nfsd4_compoundres
*resp
)
2863 struct xdr_buf
*buf
= resp
->xdr
.buf
;
2864 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2867 dprintk("--> %s slot %p\n", __func__
, slot
);
2869 slot
->sl_flags
|= NFSD4_SLOT_INITIALIZED
;
2870 slot
->sl_opcnt
= resp
->opcnt
;
2871 slot
->sl_status
= resp
->cstate
.status
;
2872 free_svc_cred(&slot
->sl_cred
);
2873 copy_cred(&slot
->sl_cred
, &resp
->rqstp
->rq_cred
);
2875 if (!nfsd4_cache_this(resp
)) {
2876 slot
->sl_flags
&= ~NFSD4_SLOT_CACHED
;
2879 slot
->sl_flags
|= NFSD4_SLOT_CACHED
;
2881 base
= resp
->cstate
.data_offset
;
2882 slot
->sl_datalen
= buf
->len
- base
;
2883 if (read_bytes_from_xdr_buf(buf
, base
, slot
->sl_data
, slot
->sl_datalen
))
2884 WARN(1, "%s: sessions DRC could not cache compound\n",
2890 * Encode the replay sequence operation from the slot values.
2891 * If cachethis is FALSE encode the uncached rep error on the next
2892 * operation which sets resp->p and increments resp->opcnt for
2893 * nfs4svc_encode_compoundres.
2897 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs
*args
,
2898 struct nfsd4_compoundres
*resp
)
2900 struct nfsd4_op
*op
;
2901 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2903 /* Encode the replayed sequence operation */
2904 op
= &args
->ops
[resp
->opcnt
- 1];
2905 nfsd4_encode_operation(resp
, op
);
2907 if (slot
->sl_flags
& NFSD4_SLOT_CACHED
)
2909 if (args
->opcnt
== 1) {
2911 * The original operation wasn't a solo sequence--we
2912 * always cache those--so this retry must not match the
2915 op
->status
= nfserr_seq_false_retry
;
2917 op
= &args
->ops
[resp
->opcnt
++];
2918 op
->status
= nfserr_retry_uncached_rep
;
2919 nfsd4_encode_operation(resp
, op
);
2925 * The sequence operation is not cached because we can use the slot and
2929 nfsd4_replay_cache_entry(struct nfsd4_compoundres
*resp
,
2930 struct nfsd4_sequence
*seq
)
2932 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2933 struct xdr_stream
*xdr
= &resp
->xdr
;
2937 dprintk("--> %s slot %p\n", __func__
, slot
);
2939 status
= nfsd4_enc_sequence_replay(resp
->rqstp
->rq_argp
, resp
);
2943 p
= xdr_reserve_space(xdr
, slot
->sl_datalen
);
2946 return nfserr_serverfault
;
2948 xdr_encode_opaque_fixed(p
, slot
->sl_data
, slot
->sl_datalen
);
2949 xdr_commit_encode(xdr
);
2951 resp
->opcnt
= slot
->sl_opcnt
;
2952 return slot
->sl_status
;
2956 * Set the exchange_id flags returned by the server.
2959 nfsd4_set_ex_flags(struct nfs4_client
*new, struct nfsd4_exchange_id
*clid
)
2961 #ifdef CONFIG_NFSD_PNFS
2962 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_PNFS_MDS
;
2964 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_NON_PNFS
;
2967 /* Referrals are supported, Migration is not. */
2968 new->cl_exchange_flags
|= EXCHGID4_FLAG_SUPP_MOVED_REFER
;
2970 /* set the wire flags to return to client. */
2971 clid
->flags
= new->cl_exchange_flags
;
2974 static bool client_has_openowners(struct nfs4_client
*clp
)
2976 struct nfs4_openowner
*oo
;
2978 list_for_each_entry(oo
, &clp
->cl_openowners
, oo_perclient
) {
2979 if (!list_empty(&oo
->oo_owner
.so_stateids
))
2985 static bool client_has_state(struct nfs4_client
*clp
)
2987 return client_has_openowners(clp
)
2988 #ifdef CONFIG_NFSD_PNFS
2989 || !list_empty(&clp
->cl_lo_states
)
2991 || !list_empty(&clp
->cl_delegations
)
2992 || !list_empty(&clp
->cl_sessions
)
2993 || !list_empty(&clp
->async_copies
);
2996 static __be32
copy_impl_id(struct nfs4_client
*clp
,
2997 struct nfsd4_exchange_id
*exid
)
2999 if (!exid
->nii_domain
.data
)
3001 xdr_netobj_dup(&clp
->cl_nii_domain
, &exid
->nii_domain
, GFP_KERNEL
);
3002 if (!clp
->cl_nii_domain
.data
)
3003 return nfserr_jukebox
;
3004 xdr_netobj_dup(&clp
->cl_nii_name
, &exid
->nii_name
, GFP_KERNEL
);
3005 if (!clp
->cl_nii_name
.data
)
3006 return nfserr_jukebox
;
3007 clp
->cl_nii_time
= exid
->nii_time
;
3012 nfsd4_exchange_id(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3013 union nfsd4_op_u
*u
)
3015 struct nfsd4_exchange_id
*exid
= &u
->exchange_id
;
3016 struct nfs4_client
*conf
, *new;
3017 struct nfs4_client
*unconf
= NULL
;
3019 char addr_str
[INET6_ADDRSTRLEN
];
3020 nfs4_verifier verf
= exid
->verifier
;
3021 struct sockaddr
*sa
= svc_addr(rqstp
);
3022 bool update
= exid
->flags
& EXCHGID4_FLAG_UPD_CONFIRMED_REC_A
;
3023 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3025 rpc_ntop(sa
, addr_str
, sizeof(addr_str
));
3026 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3027 "ip_addr=%s flags %x, spa_how %d\n",
3028 __func__
, rqstp
, exid
, exid
->clname
.len
, exid
->clname
.data
,
3029 addr_str
, exid
->flags
, exid
->spa_how
);
3031 if (exid
->flags
& ~EXCHGID4_FLAG_MASK_A
)
3032 return nfserr_inval
;
3034 new = create_client(exid
->clname
, rqstp
, &verf
);
3036 return nfserr_jukebox
;
3037 status
= copy_impl_id(new, exid
);
3041 switch (exid
->spa_how
) {
3043 exid
->spo_must_enforce
[0] = 0;
3044 exid
->spo_must_enforce
[1] = (
3045 1 << (OP_BIND_CONN_TO_SESSION
- 32) |
3046 1 << (OP_EXCHANGE_ID
- 32) |
3047 1 << (OP_CREATE_SESSION
- 32) |
3048 1 << (OP_DESTROY_SESSION
- 32) |
3049 1 << (OP_DESTROY_CLIENTID
- 32));
3051 exid
->spo_must_allow
[0] &= (1 << (OP_CLOSE
) |
3052 1 << (OP_OPEN_DOWNGRADE
) |
3054 1 << (OP_DELEGRETURN
));
3056 exid
->spo_must_allow
[1] &= (
3057 1 << (OP_TEST_STATEID
- 32) |
3058 1 << (OP_FREE_STATEID
- 32));
3059 if (!svc_rqst_integrity_protected(rqstp
)) {
3060 status
= nfserr_inval
;
3064 * Sometimes userspace doesn't give us a principal.
3065 * Which is a bug, really. Anyway, we can't enforce
3066 * MACH_CRED in that case, better to give up now:
3068 if (!new->cl_cred
.cr_principal
&&
3069 !new->cl_cred
.cr_raw_principal
) {
3070 status
= nfserr_serverfault
;
3073 new->cl_mach_cred
= true;
3076 default: /* checked by xdr code */
3080 status
= nfserr_encr_alg_unsupp
;
3084 /* Cases below refer to rfc 5661 section 18.35.4: */
3085 spin_lock(&nn
->client_lock
);
3086 conf
= find_confirmed_client_by_name(&exid
->clname
, nn
);
3088 bool creds_match
= same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
);
3089 bool verfs_match
= same_verf(&verf
, &conf
->cl_verifier
);
3092 if (!clp_used_exchangeid(conf
)) { /* buggy client */
3093 status
= nfserr_inval
;
3096 if (!nfsd4_mach_creds_match(conf
, rqstp
)) {
3097 status
= nfserr_wrong_cred
;
3100 if (!creds_match
) { /* case 9 */
3101 status
= nfserr_perm
;
3104 if (!verfs_match
) { /* case 8 */
3105 status
= nfserr_not_same
;
3109 exid
->flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
3112 if (!creds_match
) { /* case 3 */
3113 if (client_has_state(conf
)) {
3114 status
= nfserr_clid_inuse
;
3119 if (verfs_match
) { /* case 2 */
3120 conf
->cl_exchange_flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
3123 /* case 5, client reboot */
3128 if (update
) { /* case 7 */
3129 status
= nfserr_noent
;
3133 unconf
= find_unconfirmed_client_by_name(&exid
->clname
, nn
);
3134 if (unconf
) /* case 4, possible retry or client restart */
3135 unhash_client_locked(unconf
);
3137 /* case 1 (normal case) */
3140 status
= mark_client_expired_locked(conf
);
3144 new->cl_minorversion
= cstate
->minorversion
;
3145 new->cl_spo_must_allow
.u
.words
[0] = exid
->spo_must_allow
[0];
3146 new->cl_spo_must_allow
.u
.words
[1] = exid
->spo_must_allow
[1];
3148 add_to_unconfirmed(new);
3151 exid
->clientid
.cl_boot
= conf
->cl_clientid
.cl_boot
;
3152 exid
->clientid
.cl_id
= conf
->cl_clientid
.cl_id
;
3154 exid
->seqid
= conf
->cl_cs_slot
.sl_seqid
+ 1;
3155 nfsd4_set_ex_flags(conf
, exid
);
3157 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3158 conf
->cl_cs_slot
.sl_seqid
, conf
->cl_exchange_flags
);
3162 spin_unlock(&nn
->client_lock
);
3167 expire_client(unconf
);
3172 check_slot_seqid(u32 seqid
, u32 slot_seqid
, int slot_inuse
)
3174 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__
, seqid
,
3177 /* The slot is in use, and no response has been sent. */
3179 if (seqid
== slot_seqid
)
3180 return nfserr_jukebox
;
3182 return nfserr_seq_misordered
;
3184 /* Note unsigned 32-bit arithmetic handles wraparound: */
3185 if (likely(seqid
== slot_seqid
+ 1))
3187 if (seqid
== slot_seqid
)
3188 return nfserr_replay_cache
;
3189 return nfserr_seq_misordered
;
3193 * Cache the create session result into the create session single DRC
3194 * slot cache by saving the xdr structure. sl_seqid has been set.
3195 * Do this for solo or embedded create session operations.
3198 nfsd4_cache_create_session(struct nfsd4_create_session
*cr_ses
,
3199 struct nfsd4_clid_slot
*slot
, __be32 nfserr
)
3201 slot
->sl_status
= nfserr
;
3202 memcpy(&slot
->sl_cr_ses
, cr_ses
, sizeof(*cr_ses
));
3206 nfsd4_replay_create_session(struct nfsd4_create_session
*cr_ses
,
3207 struct nfsd4_clid_slot
*slot
)
3209 memcpy(cr_ses
, &slot
->sl_cr_ses
, sizeof(*cr_ses
));
3210 return slot
->sl_status
;
3213 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3214 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3215 1 + /* MIN tag is length with zero, only length */ \
3216 3 + /* version, opcount, opcode */ \
3217 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3218 /* seqid, slotID, slotID, cache */ \
3219 4 ) * sizeof(__be32))
3221 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3222 2 + /* verifier: AUTH_NULL, length 0 */\
3224 1 + /* MIN tag is length with zero, only length */ \
3225 3 + /* opcount, opcode, opstatus*/ \
3226 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3227 /* seqid, slotID, slotID, slotID, status */ \
3228 5 ) * sizeof(__be32))
3230 static __be32
check_forechannel_attrs(struct nfsd4_channel_attrs
*ca
, struct nfsd_net
*nn
)
3232 u32 maxrpc
= nn
->nfsd_serv
->sv_max_mesg
;
3234 if (ca
->maxreq_sz
< NFSD_MIN_REQ_HDR_SEQ_SZ
)
3235 return nfserr_toosmall
;
3236 if (ca
->maxresp_sz
< NFSD_MIN_RESP_HDR_SEQ_SZ
)
3237 return nfserr_toosmall
;
3238 ca
->headerpadsz
= 0;
3239 ca
->maxreq_sz
= min_t(u32
, ca
->maxreq_sz
, maxrpc
);
3240 ca
->maxresp_sz
= min_t(u32
, ca
->maxresp_sz
, maxrpc
);
3241 ca
->maxops
= min_t(u32
, ca
->maxops
, NFSD_MAX_OPS_PER_COMPOUND
);
3242 ca
->maxresp_cached
= min_t(u32
, ca
->maxresp_cached
,
3243 NFSD_SLOT_CACHE_SIZE
+ NFSD_MIN_HDR_SEQ_SZ
);
3244 ca
->maxreqs
= min_t(u32
, ca
->maxreqs
, NFSD_MAX_SLOTS_PER_SESSION
);
3246 * Note decreasing slot size below client's request may make it
3247 * difficult for client to function correctly, whereas
3248 * decreasing the number of slots will (just?) affect
3249 * performance. When short on memory we therefore prefer to
3250 * decrease number of slots instead of their size. Clients that
3251 * request larger slots than they need will get poor results:
3252 * Note that we always allow at least one slot, because our
3253 * accounting is soft and provides no guarantees either way.
3255 ca
->maxreqs
= nfsd4_get_drc_mem(ca
, nn
);
3261 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3262 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3264 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3265 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3267 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3268 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3270 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3271 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3272 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3273 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3276 static __be32
check_backchannel_attrs(struct nfsd4_channel_attrs
*ca
)
3278 ca
->headerpadsz
= 0;
3280 if (ca
->maxreq_sz
< NFSD_CB_MAX_REQ_SZ
)
3281 return nfserr_toosmall
;
3282 if (ca
->maxresp_sz
< NFSD_CB_MAX_RESP_SZ
)
3283 return nfserr_toosmall
;
3284 ca
->maxresp_cached
= 0;
3286 return nfserr_toosmall
;
3291 static __be32
nfsd4_check_cb_sec(struct nfsd4_cb_sec
*cbs
)
3293 switch (cbs
->flavor
) {
3299 * GSS case: the spec doesn't allow us to return this
3300 * error. But it also doesn't allow us not to support
3302 * I'd rather this fail hard than return some error the
3303 * client might think it can already handle:
3305 return nfserr_encr_alg_unsupp
;
3310 nfsd4_create_session(struct svc_rqst
*rqstp
,
3311 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
3313 struct nfsd4_create_session
*cr_ses
= &u
->create_session
;
3314 struct sockaddr
*sa
= svc_addr(rqstp
);
3315 struct nfs4_client
*conf
, *unconf
;
3316 struct nfs4_client
*old
= NULL
;
3317 struct nfsd4_session
*new;
3318 struct nfsd4_conn
*conn
;
3319 struct nfsd4_clid_slot
*cs_slot
= NULL
;
3321 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3323 if (cr_ses
->flags
& ~SESSION4_FLAG_MASK_A
)
3324 return nfserr_inval
;
3325 status
= nfsd4_check_cb_sec(&cr_ses
->cb_sec
);
3328 status
= check_forechannel_attrs(&cr_ses
->fore_channel
, nn
);
3331 status
= check_backchannel_attrs(&cr_ses
->back_channel
);
3333 goto out_release_drc_mem
;
3334 status
= nfserr_jukebox
;
3335 new = alloc_session(&cr_ses
->fore_channel
, &cr_ses
->back_channel
);
3337 goto out_release_drc_mem
;
3338 conn
= alloc_conn_from_crses(rqstp
, cr_ses
);
3340 goto out_free_session
;
3342 spin_lock(&nn
->client_lock
);
3343 unconf
= find_unconfirmed_client(&cr_ses
->clientid
, true, nn
);
3344 conf
= find_confirmed_client(&cr_ses
->clientid
, true, nn
);
3345 WARN_ON_ONCE(conf
&& unconf
);
3348 status
= nfserr_wrong_cred
;
3349 if (!nfsd4_mach_creds_match(conf
, rqstp
))
3351 cs_slot
= &conf
->cl_cs_slot
;
3352 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
3354 if (status
== nfserr_replay_cache
)
3355 status
= nfsd4_replay_create_session(cr_ses
, cs_slot
);
3358 } else if (unconf
) {
3359 if (!same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
) ||
3360 !rpc_cmp_addr(sa
, (struct sockaddr
*) &unconf
->cl_addr
)) {
3361 status
= nfserr_clid_inuse
;
3364 status
= nfserr_wrong_cred
;
3365 if (!nfsd4_mach_creds_match(unconf
, rqstp
))
3367 cs_slot
= &unconf
->cl_cs_slot
;
3368 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
3370 /* an unconfirmed replay returns misordered */
3371 status
= nfserr_seq_misordered
;
3374 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
3376 status
= mark_client_expired_locked(old
);
3382 move_to_confirmed(unconf
);
3385 status
= nfserr_stale_clientid
;
3389 /* Persistent sessions are not supported */
3390 cr_ses
->flags
&= ~SESSION4_PERSIST
;
3391 /* Upshifting from TCP to RDMA is not supported */
3392 cr_ses
->flags
&= ~SESSION4_RDMA
;
3394 init_session(rqstp
, new, conf
, cr_ses
);
3395 nfsd4_get_session_locked(new);
3397 memcpy(cr_ses
->sessionid
.data
, new->se_sessionid
.data
,
3398 NFS4_MAX_SESSIONID_LEN
);
3399 cs_slot
->sl_seqid
++;
3400 cr_ses
->seqid
= cs_slot
->sl_seqid
;
3402 /* cache solo and embedded create sessions under the client_lock */
3403 nfsd4_cache_create_session(cr_ses
, cs_slot
, status
);
3404 spin_unlock(&nn
->client_lock
);
3405 /* init connection and backchannel */
3406 nfsd4_init_conn(rqstp
, conn
, new);
3407 nfsd4_put_session(new);
3412 spin_unlock(&nn
->client_lock
);
3417 __free_session(new);
3418 out_release_drc_mem
:
3419 nfsd4_put_drc_mem(&cr_ses
->fore_channel
);
3423 static __be32
nfsd4_map_bcts_dir(u32
*dir
)
3426 case NFS4_CDFC4_FORE
:
3427 case NFS4_CDFC4_BACK
:
3429 case NFS4_CDFC4_FORE_OR_BOTH
:
3430 case NFS4_CDFC4_BACK_OR_BOTH
:
3431 *dir
= NFS4_CDFC4_BOTH
;
3434 return nfserr_inval
;
3437 __be32
nfsd4_backchannel_ctl(struct svc_rqst
*rqstp
,
3438 struct nfsd4_compound_state
*cstate
,
3439 union nfsd4_op_u
*u
)
3441 struct nfsd4_backchannel_ctl
*bc
= &u
->backchannel_ctl
;
3442 struct nfsd4_session
*session
= cstate
->session
;
3443 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3446 status
= nfsd4_check_cb_sec(&bc
->bc_cb_sec
);
3449 spin_lock(&nn
->client_lock
);
3450 session
->se_cb_prog
= bc
->bc_cb_program
;
3451 session
->se_cb_sec
= bc
->bc_cb_sec
;
3452 spin_unlock(&nn
->client_lock
);
3454 nfsd4_probe_callback(session
->se_client
);
3459 __be32
nfsd4_bind_conn_to_session(struct svc_rqst
*rqstp
,
3460 struct nfsd4_compound_state
*cstate
,
3461 union nfsd4_op_u
*u
)
3463 struct nfsd4_bind_conn_to_session
*bcts
= &u
->bind_conn_to_session
;
3465 struct nfsd4_conn
*conn
;
3466 struct nfsd4_session
*session
;
3467 struct net
*net
= SVC_NET(rqstp
);
3468 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
3470 if (!nfsd4_last_compound_op(rqstp
))
3471 return nfserr_not_only_op
;
3472 spin_lock(&nn
->client_lock
);
3473 session
= find_in_sessionid_hashtbl(&bcts
->sessionid
, net
, &status
);
3474 spin_unlock(&nn
->client_lock
);
3476 goto out_no_session
;
3477 status
= nfserr_wrong_cred
;
3478 if (!nfsd4_mach_creds_match(session
->se_client
, rqstp
))
3480 status
= nfsd4_map_bcts_dir(&bcts
->dir
);
3483 conn
= alloc_conn(rqstp
, bcts
->dir
);
3484 status
= nfserr_jukebox
;
3487 nfsd4_init_conn(rqstp
, conn
, session
);
3490 nfsd4_put_session(session
);
3495 static bool nfsd4_compound_in_session(struct nfsd4_compound_state
*cstate
, struct nfs4_sessionid
*sid
)
3497 if (!cstate
->session
)
3499 return !memcmp(sid
, &cstate
->session
->se_sessionid
, sizeof(*sid
));
3503 nfsd4_destroy_session(struct svc_rqst
*r
, struct nfsd4_compound_state
*cstate
,
3504 union nfsd4_op_u
*u
)
3506 struct nfs4_sessionid
*sessionid
= &u
->destroy_session
.sessionid
;
3507 struct nfsd4_session
*ses
;
3509 int ref_held_by_me
= 0;
3510 struct net
*net
= SVC_NET(r
);
3511 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
3513 status
= nfserr_not_only_op
;
3514 if (nfsd4_compound_in_session(cstate
, sessionid
)) {
3515 if (!nfsd4_last_compound_op(r
))
3519 dump_sessionid(__func__
, sessionid
);
3520 spin_lock(&nn
->client_lock
);
3521 ses
= find_in_sessionid_hashtbl(sessionid
, net
, &status
);
3523 goto out_client_lock
;
3524 status
= nfserr_wrong_cred
;
3525 if (!nfsd4_mach_creds_match(ses
->se_client
, r
))
3526 goto out_put_session
;
3527 status
= mark_session_dead_locked(ses
, 1 + ref_held_by_me
);
3529 goto out_put_session
;
3530 unhash_session(ses
);
3531 spin_unlock(&nn
->client_lock
);
3533 nfsd4_probe_callback_sync(ses
->se_client
);
3535 spin_lock(&nn
->client_lock
);
3538 nfsd4_put_session_locked(ses
);
3540 spin_unlock(&nn
->client_lock
);
3545 static struct nfsd4_conn
*__nfsd4_find_conn(struct svc_xprt
*xpt
, struct nfsd4_session
*s
)
3547 struct nfsd4_conn
*c
;
3549 list_for_each_entry(c
, &s
->se_conns
, cn_persession
) {
3550 if (c
->cn_xprt
== xpt
) {
3557 static __be32
nfsd4_sequence_check_conn(struct nfsd4_conn
*new, struct nfsd4_session
*ses
)
3559 struct nfs4_client
*clp
= ses
->se_client
;
3560 struct nfsd4_conn
*c
;
3561 __be32 status
= nfs_ok
;
3564 spin_lock(&clp
->cl_lock
);
3565 c
= __nfsd4_find_conn(new->cn_xprt
, ses
);
3568 status
= nfserr_conn_not_bound_to_session
;
3569 if (clp
->cl_mach_cred
)
3571 __nfsd4_hash_conn(new, ses
);
3572 spin_unlock(&clp
->cl_lock
);
3573 ret
= nfsd4_register_conn(new);
3575 /* oops; xprt is already down: */
3576 nfsd4_conn_lost(&new->cn_xpt_user
);
3579 spin_unlock(&clp
->cl_lock
);
3584 static bool nfsd4_session_too_many_ops(struct svc_rqst
*rqstp
, struct nfsd4_session
*session
)
3586 struct nfsd4_compoundargs
*args
= rqstp
->rq_argp
;
3588 return args
->opcnt
> session
->se_fchannel
.maxops
;
3591 static bool nfsd4_request_too_big(struct svc_rqst
*rqstp
,
3592 struct nfsd4_session
*session
)
3594 struct xdr_buf
*xb
= &rqstp
->rq_arg
;
3596 return xb
->len
> session
->se_fchannel
.maxreq_sz
;
3599 static bool replay_matches_cache(struct svc_rqst
*rqstp
,
3600 struct nfsd4_sequence
*seq
, struct nfsd4_slot
*slot
)
3602 struct nfsd4_compoundargs
*argp
= rqstp
->rq_argp
;
3604 if ((bool)(slot
->sl_flags
& NFSD4_SLOT_CACHETHIS
) !=
3605 (bool)seq
->cachethis
)
3608 * If there's an error then the reply can have fewer ops than
3611 if (slot
->sl_opcnt
< argp
->opcnt
&& !slot
->sl_status
)
3614 * But if we cached a reply with *more* ops than the call you're
3615 * sending us now, then this new call is clearly not really a
3616 * replay of the old one:
3618 if (slot
->sl_opcnt
> argp
->opcnt
)
3620 /* This is the only check explicitly called by spec: */
3621 if (!same_creds(&rqstp
->rq_cred
, &slot
->sl_cred
))
3624 * There may be more comparisons we could actually do, but the
3625 * spec doesn't require us to catch every case where the calls
3626 * don't match (that would require caching the call as well as
3627 * the reply), so we don't bother.
3633 nfsd4_sequence(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3634 union nfsd4_op_u
*u
)
3636 struct nfsd4_sequence
*seq
= &u
->sequence
;
3637 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
3638 struct xdr_stream
*xdr
= &resp
->xdr
;
3639 struct nfsd4_session
*session
;
3640 struct nfs4_client
*clp
;
3641 struct nfsd4_slot
*slot
;
3642 struct nfsd4_conn
*conn
;
3645 struct net
*net
= SVC_NET(rqstp
);
3646 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
3648 if (resp
->opcnt
!= 1)
3649 return nfserr_sequence_pos
;
3652 * Will be either used or freed by nfsd4_sequence_check_conn
3655 conn
= alloc_conn(rqstp
, NFS4_CDFC4_FORE
);
3657 return nfserr_jukebox
;
3659 spin_lock(&nn
->client_lock
);
3660 session
= find_in_sessionid_hashtbl(&seq
->sessionid
, net
, &status
);
3662 goto out_no_session
;
3663 clp
= session
->se_client
;
3665 status
= nfserr_too_many_ops
;
3666 if (nfsd4_session_too_many_ops(rqstp
, session
))
3667 goto out_put_session
;
3669 status
= nfserr_req_too_big
;
3670 if (nfsd4_request_too_big(rqstp
, session
))
3671 goto out_put_session
;
3673 status
= nfserr_badslot
;
3674 if (seq
->slotid
>= session
->se_fchannel
.maxreqs
)
3675 goto out_put_session
;
3677 slot
= session
->se_slots
[seq
->slotid
];
3678 dprintk("%s: slotid %d\n", __func__
, seq
->slotid
);
3680 /* We do not negotiate the number of slots yet, so set the
3681 * maxslots to the session maxreqs which is used to encode
3682 * sr_highest_slotid and the sr_target_slot id to maxslots */
3683 seq
->maxslots
= session
->se_fchannel
.maxreqs
;
3685 status
= check_slot_seqid(seq
->seqid
, slot
->sl_seqid
,
3686 slot
->sl_flags
& NFSD4_SLOT_INUSE
);
3687 if (status
== nfserr_replay_cache
) {
3688 status
= nfserr_seq_misordered
;
3689 if (!(slot
->sl_flags
& NFSD4_SLOT_INITIALIZED
))
3690 goto out_put_session
;
3691 status
= nfserr_seq_false_retry
;
3692 if (!replay_matches_cache(rqstp
, seq
, slot
))
3693 goto out_put_session
;
3694 cstate
->slot
= slot
;
3695 cstate
->session
= session
;
3697 /* Return the cached reply status and set cstate->status
3698 * for nfsd4_proc_compound processing */
3699 status
= nfsd4_replay_cache_entry(resp
, seq
);
3700 cstate
->status
= nfserr_replay_cache
;
3704 goto out_put_session
;
3706 status
= nfsd4_sequence_check_conn(conn
, session
);
3709 goto out_put_session
;
3711 buflen
= (seq
->cachethis
) ?
3712 session
->se_fchannel
.maxresp_cached
:
3713 session
->se_fchannel
.maxresp_sz
;
3714 status
= (seq
->cachethis
) ? nfserr_rep_too_big_to_cache
:
3716 if (xdr_restrict_buflen(xdr
, buflen
- rqstp
->rq_auth_slack
))
3717 goto out_put_session
;
3718 svc_reserve(rqstp
, buflen
);
3721 /* Success! bump slot seqid */
3722 slot
->sl_seqid
= seq
->seqid
;
3723 slot
->sl_flags
|= NFSD4_SLOT_INUSE
;
3725 slot
->sl_flags
|= NFSD4_SLOT_CACHETHIS
;
3727 slot
->sl_flags
&= ~NFSD4_SLOT_CACHETHIS
;
3729 cstate
->slot
= slot
;
3730 cstate
->session
= session
;
3734 switch (clp
->cl_cb_state
) {
3736 seq
->status_flags
= SEQ4_STATUS_CB_PATH_DOWN
;
3738 case NFSD4_CB_FAULT
:
3739 seq
->status_flags
= SEQ4_STATUS_BACKCHANNEL_FAULT
;
3742 seq
->status_flags
= 0;
3744 if (!list_empty(&clp
->cl_revoked
))
3745 seq
->status_flags
|= SEQ4_STATUS_RECALLABLE_STATE_REVOKED
;
3749 spin_unlock(&nn
->client_lock
);
3752 nfsd4_put_session_locked(session
);
3753 goto out_no_session
;
3757 nfsd4_sequence_done(struct nfsd4_compoundres
*resp
)
3759 struct nfsd4_compound_state
*cs
= &resp
->cstate
;
3761 if (nfsd4_has_session(cs
)) {
3762 if (cs
->status
!= nfserr_replay_cache
) {
3763 nfsd4_store_cache_entry(resp
);
3764 cs
->slot
->sl_flags
&= ~NFSD4_SLOT_INUSE
;
3766 /* Drop session reference that was taken in nfsd4_sequence() */
3767 nfsd4_put_session(cs
->session
);
3769 put_client_renew(cs
->clp
);
3773 nfsd4_destroy_clientid(struct svc_rqst
*rqstp
,
3774 struct nfsd4_compound_state
*cstate
,
3775 union nfsd4_op_u
*u
)
3777 struct nfsd4_destroy_clientid
*dc
= &u
->destroy_clientid
;
3778 struct nfs4_client
*conf
, *unconf
;
3779 struct nfs4_client
*clp
= NULL
;
3781 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3783 spin_lock(&nn
->client_lock
);
3784 unconf
= find_unconfirmed_client(&dc
->clientid
, true, nn
);
3785 conf
= find_confirmed_client(&dc
->clientid
, true, nn
);
3786 WARN_ON_ONCE(conf
&& unconf
);
3789 if (client_has_state(conf
)) {
3790 status
= nfserr_clientid_busy
;
3793 status
= mark_client_expired_locked(conf
);
3800 status
= nfserr_stale_clientid
;
3803 if (!nfsd4_mach_creds_match(clp
, rqstp
)) {
3805 status
= nfserr_wrong_cred
;
3808 unhash_client_locked(clp
);
3810 spin_unlock(&nn
->client_lock
);
3817 nfsd4_reclaim_complete(struct svc_rqst
*rqstp
,
3818 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
3820 struct nfsd4_reclaim_complete
*rc
= &u
->reclaim_complete
;
3823 if (rc
->rca_one_fs
) {
3824 if (!cstate
->current_fh
.fh_dentry
)
3825 return nfserr_nofilehandle
;
3827 * We don't take advantage of the rca_one_fs case.
3828 * That's OK, it's optional, we can safely ignore it.
3833 status
= nfserr_complete_already
;
3834 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
,
3835 &cstate
->session
->se_client
->cl_flags
))
3838 status
= nfserr_stale_clientid
;
3839 if (is_client_expired(cstate
->session
->se_client
))
3841 * The following error isn't really legal.
3842 * But we only get here if the client just explicitly
3843 * destroyed the client. Surely it no longer cares what
3844 * error it gets back on an operation for the dead
3850 nfsd4_client_record_create(cstate
->session
->se_client
);
3851 inc_reclaim_complete(cstate
->session
->se_client
);
3857 nfsd4_setclientid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
3858 union nfsd4_op_u
*u
)
3860 struct nfsd4_setclientid
*setclid
= &u
->setclientid
;
3861 struct xdr_netobj clname
= setclid
->se_name
;
3862 nfs4_verifier clverifier
= setclid
->se_verf
;
3863 struct nfs4_client
*conf
, *new;
3864 struct nfs4_client
*unconf
= NULL
;
3866 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3868 new = create_client(clname
, rqstp
, &clverifier
);
3870 return nfserr_jukebox
;
3871 /* Cases below refer to rfc 3530 section 14.2.33: */
3872 spin_lock(&nn
->client_lock
);
3873 conf
= find_confirmed_client_by_name(&clname
, nn
);
3874 if (conf
&& client_has_state(conf
)) {
3876 status
= nfserr_clid_inuse
;
3877 if (clp_used_exchangeid(conf
))
3879 if (!same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
)) {
3880 char addr_str
[INET6_ADDRSTRLEN
];
3881 rpc_ntop((struct sockaddr
*) &conf
->cl_addr
, addr_str
,
3883 dprintk("NFSD: setclientid: string in use by client "
3884 "at %s\n", addr_str
);
3888 unconf
= find_unconfirmed_client_by_name(&clname
, nn
);
3890 unhash_client_locked(unconf
);
3891 if (conf
&& same_verf(&conf
->cl_verifier
, &clverifier
)) {
3892 /* case 1: probable callback update */
3893 copy_clid(new, conf
);
3894 gen_confirm(new, nn
);
3895 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3897 new->cl_minorversion
= 0;
3898 gen_callback(new, setclid
, rqstp
);
3899 add_to_unconfirmed(new);
3900 setclid
->se_clientid
.cl_boot
= new->cl_clientid
.cl_boot
;
3901 setclid
->se_clientid
.cl_id
= new->cl_clientid
.cl_id
;
3902 memcpy(setclid
->se_confirm
.data
, new->cl_confirm
.data
, sizeof(setclid
->se_confirm
.data
));
3906 spin_unlock(&nn
->client_lock
);
3910 expire_client(unconf
);
3916 nfsd4_setclientid_confirm(struct svc_rqst
*rqstp
,
3917 struct nfsd4_compound_state
*cstate
,
3918 union nfsd4_op_u
*u
)
3920 struct nfsd4_setclientid_confirm
*setclientid_confirm
=
3921 &u
->setclientid_confirm
;
3922 struct nfs4_client
*conf
, *unconf
;
3923 struct nfs4_client
*old
= NULL
;
3924 nfs4_verifier confirm
= setclientid_confirm
->sc_confirm
;
3925 clientid_t
* clid
= &setclientid_confirm
->sc_clientid
;
3927 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
3929 if (STALE_CLIENTID(clid
, nn
))
3930 return nfserr_stale_clientid
;
3932 spin_lock(&nn
->client_lock
);
3933 conf
= find_confirmed_client(clid
, false, nn
);
3934 unconf
= find_unconfirmed_client(clid
, false, nn
);
3936 * We try hard to give out unique clientid's, so if we get an
3937 * attempt to confirm the same clientid with a different cred,
3938 * the client may be buggy; this should never happen.
3940 * Nevertheless, RFC 7530 recommends INUSE for this case:
3942 status
= nfserr_clid_inuse
;
3943 if (unconf
&& !same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
))
3945 if (conf
&& !same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
))
3947 /* cases below refer to rfc 3530 section 14.2.34: */
3948 if (!unconf
|| !same_verf(&confirm
, &unconf
->cl_confirm
)) {
3949 if (conf
&& same_verf(&confirm
, &conf
->cl_confirm
)) {
3950 /* case 2: probable retransmit */
3952 } else /* case 4: client hasn't noticed we rebooted yet? */
3953 status
= nfserr_stale_clientid
;
3957 if (conf
) { /* case 1: callback update */
3959 unhash_client_locked(old
);
3960 nfsd4_change_callback(conf
, &unconf
->cl_cb_conn
);
3961 } else { /* case 3: normal case; new or rebooted client */
3962 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
3964 status
= nfserr_clid_inuse
;
3965 if (client_has_state(old
)
3966 && !same_creds(&unconf
->cl_cred
,
3969 status
= mark_client_expired_locked(old
);
3975 move_to_confirmed(unconf
);
3978 get_client_locked(conf
);
3979 spin_unlock(&nn
->client_lock
);
3980 nfsd4_probe_callback(conf
);
3981 spin_lock(&nn
->client_lock
);
3982 put_client_renew_locked(conf
);
3984 spin_unlock(&nn
->client_lock
);
3990 static struct nfs4_file
*nfsd4_alloc_file(void)
3992 return kmem_cache_alloc(file_slab
, GFP_KERNEL
);
3995 /* OPEN Share state helper functions */
3996 static void nfsd4_init_file(struct knfsd_fh
*fh
, unsigned int hashval
,
3997 struct nfs4_file
*fp
)
3999 lockdep_assert_held(&state_lock
);
4001 refcount_set(&fp
->fi_ref
, 1);
4002 spin_lock_init(&fp
->fi_lock
);
4003 INIT_LIST_HEAD(&fp
->fi_stateids
);
4004 INIT_LIST_HEAD(&fp
->fi_delegations
);
4005 INIT_LIST_HEAD(&fp
->fi_clnt_odstate
);
4006 fh_copy_shallow(&fp
->fi_fhandle
, fh
);
4007 fp
->fi_deleg_file
= NULL
;
4008 fp
->fi_had_conflict
= false;
4009 fp
->fi_share_deny
= 0;
4010 memset(fp
->fi_fds
, 0, sizeof(fp
->fi_fds
));
4011 memset(fp
->fi_access
, 0, sizeof(fp
->fi_access
));
4012 #ifdef CONFIG_NFSD_PNFS
4013 INIT_LIST_HEAD(&fp
->fi_lo_states
);
4014 atomic_set(&fp
->fi_lo_recalls
, 0);
4016 hlist_add_head_rcu(&fp
->fi_hash
, &file_hashtbl
[hashval
]);
4020 nfsd4_free_slabs(void)
4022 kmem_cache_destroy(client_slab
);
4023 kmem_cache_destroy(openowner_slab
);
4024 kmem_cache_destroy(lockowner_slab
);
4025 kmem_cache_destroy(file_slab
);
4026 kmem_cache_destroy(stateid_slab
);
4027 kmem_cache_destroy(deleg_slab
);
4028 kmem_cache_destroy(odstate_slab
);
4032 nfsd4_init_slabs(void)
4034 client_slab
= kmem_cache_create("nfsd4_clients",
4035 sizeof(struct nfs4_client
), 0, 0, NULL
);
4036 if (client_slab
== NULL
)
4038 openowner_slab
= kmem_cache_create("nfsd4_openowners",
4039 sizeof(struct nfs4_openowner
), 0, 0, NULL
);
4040 if (openowner_slab
== NULL
)
4041 goto out_free_client_slab
;
4042 lockowner_slab
= kmem_cache_create("nfsd4_lockowners",
4043 sizeof(struct nfs4_lockowner
), 0, 0, NULL
);
4044 if (lockowner_slab
== NULL
)
4045 goto out_free_openowner_slab
;
4046 file_slab
= kmem_cache_create("nfsd4_files",
4047 sizeof(struct nfs4_file
), 0, 0, NULL
);
4048 if (file_slab
== NULL
)
4049 goto out_free_lockowner_slab
;
4050 stateid_slab
= kmem_cache_create("nfsd4_stateids",
4051 sizeof(struct nfs4_ol_stateid
), 0, 0, NULL
);
4052 if (stateid_slab
== NULL
)
4053 goto out_free_file_slab
;
4054 deleg_slab
= kmem_cache_create("nfsd4_delegations",
4055 sizeof(struct nfs4_delegation
), 0, 0, NULL
);
4056 if (deleg_slab
== NULL
)
4057 goto out_free_stateid_slab
;
4058 odstate_slab
= kmem_cache_create("nfsd4_odstate",
4059 sizeof(struct nfs4_clnt_odstate
), 0, 0, NULL
);
4060 if (odstate_slab
== NULL
)
4061 goto out_free_deleg_slab
;
4064 out_free_deleg_slab
:
4065 kmem_cache_destroy(deleg_slab
);
4066 out_free_stateid_slab
:
4067 kmem_cache_destroy(stateid_slab
);
4069 kmem_cache_destroy(file_slab
);
4070 out_free_lockowner_slab
:
4071 kmem_cache_destroy(lockowner_slab
);
4072 out_free_openowner_slab
:
4073 kmem_cache_destroy(openowner_slab
);
4074 out_free_client_slab
:
4075 kmem_cache_destroy(client_slab
);
4077 dprintk("nfsd4: out of memory while initializing nfsv4\n");
4081 static void init_nfs4_replay(struct nfs4_replay
*rp
)
4083 rp
->rp_status
= nfserr_serverfault
;
4085 rp
->rp_buf
= rp
->rp_ibuf
;
4086 mutex_init(&rp
->rp_mutex
);
4089 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state
*cstate
,
4090 struct nfs4_stateowner
*so
)
4092 if (!nfsd4_has_session(cstate
)) {
4093 mutex_lock(&so
->so_replay
.rp_mutex
);
4094 cstate
->replay_owner
= nfs4_get_stateowner(so
);
4098 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state
*cstate
)
4100 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
4103 cstate
->replay_owner
= NULL
;
4104 mutex_unlock(&so
->so_replay
.rp_mutex
);
4105 nfs4_put_stateowner(so
);
4109 static inline void *alloc_stateowner(struct kmem_cache
*slab
, struct xdr_netobj
*owner
, struct nfs4_client
*clp
)
4111 struct nfs4_stateowner
*sop
;
4113 sop
= kmem_cache_alloc(slab
, GFP_KERNEL
);
4117 xdr_netobj_dup(&sop
->so_owner
, owner
, GFP_KERNEL
);
4118 if (!sop
->so_owner
.data
) {
4119 kmem_cache_free(slab
, sop
);
4123 INIT_LIST_HEAD(&sop
->so_stateids
);
4124 sop
->so_client
= clp
;
4125 init_nfs4_replay(&sop
->so_replay
);
4126 atomic_set(&sop
->so_count
, 1);
4130 static void hash_openowner(struct nfs4_openowner
*oo
, struct nfs4_client
*clp
, unsigned int strhashval
)
4132 lockdep_assert_held(&clp
->cl_lock
);
4134 list_add(&oo
->oo_owner
.so_strhash
,
4135 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
4136 list_add(&oo
->oo_perclient
, &clp
->cl_openowners
);
4139 static void nfs4_unhash_openowner(struct nfs4_stateowner
*so
)
4141 unhash_openowner_locked(openowner(so
));
4144 static void nfs4_free_openowner(struct nfs4_stateowner
*so
)
4146 struct nfs4_openowner
*oo
= openowner(so
);
4148 kmem_cache_free(openowner_slab
, oo
);
4151 static const struct nfs4_stateowner_operations openowner_ops
= {
4152 .so_unhash
= nfs4_unhash_openowner
,
4153 .so_free
= nfs4_free_openowner
,
4156 static struct nfs4_ol_stateid
*
4157 nfsd4_find_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
4159 struct nfs4_ol_stateid
*local
, *ret
= NULL
;
4160 struct nfs4_openowner
*oo
= open
->op_openowner
;
4162 lockdep_assert_held(&fp
->fi_lock
);
4164 list_for_each_entry(local
, &fp
->fi_stateids
, st_perfile
) {
4165 /* ignore lock owners */
4166 if (local
->st_stateowner
->so_is_open_owner
== 0)
4168 if (local
->st_stateowner
!= &oo
->oo_owner
)
4170 if (local
->st_stid
.sc_type
== NFS4_OPEN_STID
) {
4172 refcount_inc(&ret
->st_stid
.sc_count
);
4180 nfsd4_verify_open_stid(struct nfs4_stid
*s
)
4182 __be32 ret
= nfs_ok
;
4184 switch (s
->sc_type
) {
4188 case NFS4_CLOSED_STID
:
4189 case NFS4_CLOSED_DELEG_STID
:
4190 ret
= nfserr_bad_stateid
;
4192 case NFS4_REVOKED_DELEG_STID
:
4193 ret
= nfserr_deleg_revoked
;
4198 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4200 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid
*stp
)
4204 mutex_lock_nested(&stp
->st_mutex
, LOCK_STATEID_MUTEX
);
4205 ret
= nfsd4_verify_open_stid(&stp
->st_stid
);
4207 mutex_unlock(&stp
->st_mutex
);
4211 static struct nfs4_ol_stateid
*
4212 nfsd4_find_and_lock_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
4214 struct nfs4_ol_stateid
*stp
;
4216 spin_lock(&fp
->fi_lock
);
4217 stp
= nfsd4_find_existing_open(fp
, open
);
4218 spin_unlock(&fp
->fi_lock
);
4219 if (!stp
|| nfsd4_lock_ol_stateid(stp
) == nfs_ok
)
4221 nfs4_put_stid(&stp
->st_stid
);
4226 static struct nfs4_openowner
*
4227 alloc_init_open_stateowner(unsigned int strhashval
, struct nfsd4_open
*open
,
4228 struct nfsd4_compound_state
*cstate
)
4230 struct nfs4_client
*clp
= cstate
->clp
;
4231 struct nfs4_openowner
*oo
, *ret
;
4233 oo
= alloc_stateowner(openowner_slab
, &open
->op_owner
, clp
);
4236 oo
->oo_owner
.so_ops
= &openowner_ops
;
4237 oo
->oo_owner
.so_is_open_owner
= 1;
4238 oo
->oo_owner
.so_seqid
= open
->op_seqid
;
4240 if (nfsd4_has_session(cstate
))
4241 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
4243 oo
->oo_last_closed_stid
= NULL
;
4244 INIT_LIST_HEAD(&oo
->oo_close_lru
);
4245 spin_lock(&clp
->cl_lock
);
4246 ret
= find_openstateowner_str_locked(strhashval
, open
, clp
);
4248 hash_openowner(oo
, clp
, strhashval
);
4251 nfs4_free_stateowner(&oo
->oo_owner
);
4253 spin_unlock(&clp
->cl_lock
);
4257 static struct nfs4_ol_stateid
*
4258 init_open_stateid(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
4261 struct nfs4_openowner
*oo
= open
->op_openowner
;
4262 struct nfs4_ol_stateid
*retstp
= NULL
;
4263 struct nfs4_ol_stateid
*stp
;
4266 /* We are moving these outside of the spinlocks to avoid the warnings */
4267 mutex_init(&stp
->st_mutex
);
4268 mutex_lock_nested(&stp
->st_mutex
, OPEN_STATEID_MUTEX
);
4271 spin_lock(&oo
->oo_owner
.so_client
->cl_lock
);
4272 spin_lock(&fp
->fi_lock
);
4274 retstp
= nfsd4_find_existing_open(fp
, open
);
4278 open
->op_stp
= NULL
;
4279 refcount_inc(&stp
->st_stid
.sc_count
);
4280 stp
->st_stid
.sc_type
= NFS4_OPEN_STID
;
4281 INIT_LIST_HEAD(&stp
->st_locks
);
4282 stp
->st_stateowner
= nfs4_get_stateowner(&oo
->oo_owner
);
4284 stp
->st_stid
.sc_file
= fp
;
4285 stp
->st_access_bmap
= 0;
4286 stp
->st_deny_bmap
= 0;
4287 stp
->st_openstp
= NULL
;
4288 list_add(&stp
->st_perstateowner
, &oo
->oo_owner
.so_stateids
);
4289 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
4292 spin_unlock(&fp
->fi_lock
);
4293 spin_unlock(&oo
->oo_owner
.so_client
->cl_lock
);
4295 /* Handle races with CLOSE */
4296 if (nfsd4_lock_ol_stateid(retstp
) != nfs_ok
) {
4297 nfs4_put_stid(&retstp
->st_stid
);
4300 /* To keep mutex tracking happy */
4301 mutex_unlock(&stp
->st_mutex
);
4308 * In the 4.0 case we need to keep the owners around a little while to handle
4309 * CLOSE replay. We still do need to release any file access that is held by
4310 * them before returning however.
4313 move_to_close_lru(struct nfs4_ol_stateid
*s
, struct net
*net
)
4315 struct nfs4_ol_stateid
*last
;
4316 struct nfs4_openowner
*oo
= openowner(s
->st_stateowner
);
4317 struct nfsd_net
*nn
= net_generic(s
->st_stid
.sc_client
->net
,
4320 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo
);
4323 * We know that we hold one reference via nfsd4_close, and another
4324 * "persistent" reference for the client. If the refcount is higher
4325 * than 2, then there are still calls in progress that are using this
4326 * stateid. We can't put the sc_file reference until they are finished.
4327 * Wait for the refcount to drop to 2. Since it has been unhashed,
4328 * there should be no danger of the refcount going back up again at
4331 wait_event(close_wq
, refcount_read(&s
->st_stid
.sc_count
) == 2);
4333 release_all_access(s
);
4334 if (s
->st_stid
.sc_file
) {
4335 put_nfs4_file(s
->st_stid
.sc_file
);
4336 s
->st_stid
.sc_file
= NULL
;
4339 spin_lock(&nn
->client_lock
);
4340 last
= oo
->oo_last_closed_stid
;
4341 oo
->oo_last_closed_stid
= s
;
4342 list_move_tail(&oo
->oo_close_lru
, &nn
->close_lru
);
4343 oo
->oo_time
= ktime_get_boottime_seconds();
4344 spin_unlock(&nn
->client_lock
);
4346 nfs4_put_stid(&last
->st_stid
);
4349 /* search file_hashtbl[] for file */
4350 static struct nfs4_file
*
4351 find_file_locked(struct knfsd_fh
*fh
, unsigned int hashval
)
4353 struct nfs4_file
*fp
;
4355 hlist_for_each_entry_rcu(fp
, &file_hashtbl
[hashval
], fi_hash
,
4356 lockdep_is_held(&state_lock
)) {
4357 if (fh_match(&fp
->fi_fhandle
, fh
)) {
4358 if (refcount_inc_not_zero(&fp
->fi_ref
))
4366 find_file(struct knfsd_fh
*fh
)
4368 struct nfs4_file
*fp
;
4369 unsigned int hashval
= file_hashval(fh
);
4372 fp
= find_file_locked(fh
, hashval
);
4377 static struct nfs4_file
*
4378 find_or_add_file(struct nfs4_file
*new, struct knfsd_fh
*fh
)
4380 struct nfs4_file
*fp
;
4381 unsigned int hashval
= file_hashval(fh
);
4384 fp
= find_file_locked(fh
, hashval
);
4389 spin_lock(&state_lock
);
4390 fp
= find_file_locked(fh
, hashval
);
4391 if (likely(fp
== NULL
)) {
4392 nfsd4_init_file(fh
, hashval
, new);
4395 spin_unlock(&state_lock
);
4401 * Called to check deny when READ with all zero stateid or
4402 * WRITE with all zero or all one stateid
4405 nfs4_share_conflict(struct svc_fh
*current_fh
, unsigned int deny_type
)
4407 struct nfs4_file
*fp
;
4408 __be32 ret
= nfs_ok
;
4410 fp
= find_file(¤t_fh
->fh_handle
);
4413 /* Check for conflicting share reservations */
4414 spin_lock(&fp
->fi_lock
);
4415 if (fp
->fi_share_deny
& deny_type
)
4416 ret
= nfserr_locked
;
4417 spin_unlock(&fp
->fi_lock
);
4422 static void nfsd4_cb_recall_prepare(struct nfsd4_callback
*cb
)
4424 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
4425 struct nfsd_net
*nn
= net_generic(dp
->dl_stid
.sc_client
->net
,
4428 block_delegations(&dp
->dl_stid
.sc_file
->fi_fhandle
);
4431 * We can't do this in nfsd_break_deleg_cb because it is
4432 * already holding inode->i_lock.
4434 * If the dl_time != 0, then we know that it has already been
4435 * queued for a lease break. Don't queue it again.
4437 spin_lock(&state_lock
);
4438 if (dp
->dl_time
== 0) {
4439 dp
->dl_time
= ktime_get_boottime_seconds();
4440 list_add_tail(&dp
->dl_recall_lru
, &nn
->del_recall_lru
);
4442 spin_unlock(&state_lock
);
4445 static int nfsd4_cb_recall_done(struct nfsd4_callback
*cb
,
4446 struct rpc_task
*task
)
4448 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
4450 if (dp
->dl_stid
.sc_type
== NFS4_CLOSED_DELEG_STID
)
4453 switch (task
->tk_status
) {
4456 case -NFS4ERR_DELAY
:
4457 rpc_delay(task
, 2 * HZ
);
4460 case -NFS4ERR_BAD_STATEID
:
4462 * Race: client probably got cb_recall before open reply
4463 * granting delegation.
4465 if (dp
->dl_retries
--) {
4466 rpc_delay(task
, 2 * HZ
);
4475 static void nfsd4_cb_recall_release(struct nfsd4_callback
*cb
)
4477 struct nfs4_delegation
*dp
= cb_to_delegation(cb
);
4479 nfs4_put_stid(&dp
->dl_stid
);
4482 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops
= {
4483 .prepare
= nfsd4_cb_recall_prepare
,
4484 .done
= nfsd4_cb_recall_done
,
4485 .release
= nfsd4_cb_recall_release
,
4488 static void nfsd_break_one_deleg(struct nfs4_delegation
*dp
)
4491 * We're assuming the state code never drops its reference
4492 * without first removing the lease. Since we're in this lease
4493 * callback (and since the lease code is serialized by the
4494 * i_lock) we know the server hasn't removed the lease yet, and
4495 * we know it's safe to take a reference.
4497 refcount_inc(&dp
->dl_stid
.sc_count
);
4498 nfsd4_run_cb(&dp
->dl_recall
);
4501 /* Called from break_lease() with i_lock held. */
4503 nfsd_break_deleg_cb(struct file_lock
*fl
)
4506 struct nfs4_delegation
*dp
= (struct nfs4_delegation
*)fl
->fl_owner
;
4507 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
4510 * We don't want the locks code to timeout the lease for us;
4511 * we'll remove it ourself if a delegation isn't returned
4514 fl
->fl_break_time
= 0;
4516 spin_lock(&fp
->fi_lock
);
4517 fp
->fi_had_conflict
= true;
4518 nfsd_break_one_deleg(dp
);
4519 spin_unlock(&fp
->fi_lock
);
4524 nfsd_change_deleg_cb(struct file_lock
*onlist
, int arg
,
4525 struct list_head
*dispose
)
4528 return lease_modify(onlist
, arg
, dispose
);
4533 static const struct lock_manager_operations nfsd_lease_mng_ops
= {
4534 .lm_break
= nfsd_break_deleg_cb
,
4535 .lm_change
= nfsd_change_deleg_cb
,
4538 static __be32
nfsd4_check_seqid(struct nfsd4_compound_state
*cstate
, struct nfs4_stateowner
*so
, u32 seqid
)
4540 if (nfsd4_has_session(cstate
))
4542 if (seqid
== so
->so_seqid
- 1)
4543 return nfserr_replay_me
;
4544 if (seqid
== so
->so_seqid
)
4546 return nfserr_bad_seqid
;
4549 static __be32
lookup_clientid(clientid_t
*clid
,
4550 struct nfsd4_compound_state
*cstate
,
4551 struct nfsd_net
*nn
,
4554 struct nfs4_client
*found
;
4557 found
= cstate
->clp
;
4558 if (!same_clid(&found
->cl_clientid
, clid
))
4559 return nfserr_stale_clientid
;
4563 if (STALE_CLIENTID(clid
, nn
))
4564 return nfserr_stale_clientid
;
4567 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4568 * cached already then we know this is for is for v4.0 and "sessions"
4571 WARN_ON_ONCE(cstate
->session
);
4572 spin_lock(&nn
->client_lock
);
4573 found
= find_confirmed_client(clid
, sessions
, nn
);
4575 spin_unlock(&nn
->client_lock
);
4576 return nfserr_expired
;
4578 atomic_inc(&found
->cl_rpc_users
);
4579 spin_unlock(&nn
->client_lock
);
4581 /* Cache the nfs4_client in cstate! */
4582 cstate
->clp
= found
;
4587 nfsd4_process_open1(struct nfsd4_compound_state
*cstate
,
4588 struct nfsd4_open
*open
, struct nfsd_net
*nn
)
4590 clientid_t
*clientid
= &open
->op_clientid
;
4591 struct nfs4_client
*clp
= NULL
;
4592 unsigned int strhashval
;
4593 struct nfs4_openowner
*oo
= NULL
;
4596 if (STALE_CLIENTID(&open
->op_clientid
, nn
))
4597 return nfserr_stale_clientid
;
4599 * In case we need it later, after we've already created the
4600 * file and don't want to risk a further failure:
4602 open
->op_file
= nfsd4_alloc_file();
4603 if (open
->op_file
== NULL
)
4604 return nfserr_jukebox
;
4606 status
= lookup_clientid(clientid
, cstate
, nn
, false);
4611 strhashval
= ownerstr_hashval(&open
->op_owner
);
4612 oo
= find_openstateowner_str(strhashval
, open
, clp
);
4613 open
->op_openowner
= oo
;
4617 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
4618 /* Replace unconfirmed owners without checking for replay. */
4619 release_openowner(oo
);
4620 open
->op_openowner
= NULL
;
4623 status
= nfsd4_check_seqid(cstate
, &oo
->oo_owner
, open
->op_seqid
);
4628 oo
= alloc_init_open_stateowner(strhashval
, open
, cstate
);
4630 return nfserr_jukebox
;
4631 open
->op_openowner
= oo
;
4633 open
->op_stp
= nfs4_alloc_open_stateid(clp
);
4635 return nfserr_jukebox
;
4637 if (nfsd4_has_session(cstate
) &&
4638 (cstate
->current_fh
.fh_export
->ex_flags
& NFSEXP_PNFS
)) {
4639 open
->op_odstate
= alloc_clnt_odstate(clp
);
4640 if (!open
->op_odstate
)
4641 return nfserr_jukebox
;
4647 static inline __be32
4648 nfs4_check_delegmode(struct nfs4_delegation
*dp
, int flags
)
4650 if ((flags
& WR_STATE
) && (dp
->dl_type
== NFS4_OPEN_DELEGATE_READ
))
4651 return nfserr_openmode
;
4656 static int share_access_to_flags(u32 share_access
)
4658 return share_access
== NFS4_SHARE_ACCESS_READ
? RD_STATE
: WR_STATE
;
4661 static struct nfs4_delegation
*find_deleg_stateid(struct nfs4_client
*cl
, stateid_t
*s
)
4663 struct nfs4_stid
*ret
;
4665 ret
= find_stateid_by_type(cl
, s
,
4666 NFS4_DELEG_STID
|NFS4_REVOKED_DELEG_STID
);
4669 return delegstateid(ret
);
4672 static bool nfsd4_is_deleg_cur(struct nfsd4_open
*open
)
4674 return open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEGATE_CUR
||
4675 open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEG_CUR_FH
;
4679 nfs4_check_deleg(struct nfs4_client
*cl
, struct nfsd4_open
*open
,
4680 struct nfs4_delegation
**dp
)
4683 __be32 status
= nfserr_bad_stateid
;
4684 struct nfs4_delegation
*deleg
;
4686 deleg
= find_deleg_stateid(cl
, &open
->op_delegate_stateid
);
4689 if (deleg
->dl_stid
.sc_type
== NFS4_REVOKED_DELEG_STID
) {
4690 nfs4_put_stid(&deleg
->dl_stid
);
4691 if (cl
->cl_minorversion
)
4692 status
= nfserr_deleg_revoked
;
4695 flags
= share_access_to_flags(open
->op_share_access
);
4696 status
= nfs4_check_delegmode(deleg
, flags
);
4698 nfs4_put_stid(&deleg
->dl_stid
);
4703 if (!nfsd4_is_deleg_cur(open
))
4707 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
4711 static inline int nfs4_access_to_access(u32 nfs4_access
)
4715 if (nfs4_access
& NFS4_SHARE_ACCESS_READ
)
4716 flags
|= NFSD_MAY_READ
;
4717 if (nfs4_access
& NFS4_SHARE_ACCESS_WRITE
)
4718 flags
|= NFSD_MAY_WRITE
;
4722 static inline __be32
4723 nfsd4_truncate(struct svc_rqst
*rqstp
, struct svc_fh
*fh
,
4724 struct nfsd4_open
*open
)
4726 struct iattr iattr
= {
4727 .ia_valid
= ATTR_SIZE
,
4730 if (!open
->op_truncate
)
4732 if (!(open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
))
4733 return nfserr_inval
;
4734 return nfsd_setattr(rqstp
, fh
, &iattr
, 0, (time64_t
)0);
4737 static __be32
nfs4_get_vfs_file(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
,
4738 struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
,
4739 struct nfsd4_open
*open
)
4741 struct nfsd_file
*nf
= NULL
;
4743 int oflag
= nfs4_access_to_omode(open
->op_share_access
);
4744 int access
= nfs4_access_to_access(open
->op_share_access
);
4745 unsigned char old_access_bmap
, old_deny_bmap
;
4747 spin_lock(&fp
->fi_lock
);
4750 * Are we trying to set a deny mode that would conflict with
4753 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
4754 if (status
!= nfs_ok
) {
4755 spin_unlock(&fp
->fi_lock
);
4759 /* set access to the file */
4760 status
= nfs4_file_get_access(fp
, open
->op_share_access
);
4761 if (status
!= nfs_ok
) {
4762 spin_unlock(&fp
->fi_lock
);
4766 /* Set access bits in stateid */
4767 old_access_bmap
= stp
->st_access_bmap
;
4768 set_access(open
->op_share_access
, stp
);
4770 /* Set new deny mask */
4771 old_deny_bmap
= stp
->st_deny_bmap
;
4772 set_deny(open
->op_share_deny
, stp
);
4773 fp
->fi_share_deny
|= (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4775 if (!fp
->fi_fds
[oflag
]) {
4776 spin_unlock(&fp
->fi_lock
);
4777 status
= nfsd_file_acquire(rqstp
, cur_fh
, access
, &nf
);
4779 goto out_put_access
;
4780 spin_lock(&fp
->fi_lock
);
4781 if (!fp
->fi_fds
[oflag
]) {
4782 fp
->fi_fds
[oflag
] = nf
;
4786 spin_unlock(&fp
->fi_lock
);
4790 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4792 goto out_put_access
;
4796 stp
->st_access_bmap
= old_access_bmap
;
4797 nfs4_file_put_access(fp
, open
->op_share_access
);
4798 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap
), stp
);
4803 nfs4_upgrade_open(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
, struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
, struct nfsd4_open
*open
)
4806 unsigned char old_deny_bmap
= stp
->st_deny_bmap
;
4808 if (!test_access(open
->op_share_access
, stp
))
4809 return nfs4_get_vfs_file(rqstp
, fp
, cur_fh
, stp
, open
);
4811 /* test and set deny mode */
4812 spin_lock(&fp
->fi_lock
);
4813 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
4814 if (status
== nfs_ok
) {
4815 set_deny(open
->op_share_deny
, stp
);
4816 fp
->fi_share_deny
|=
4817 (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
4819 spin_unlock(&fp
->fi_lock
);
4821 if (status
!= nfs_ok
)
4824 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
4825 if (status
!= nfs_ok
)
4826 reset_union_bmap_deny(old_deny_bmap
, stp
);
4830 /* Should we give out recallable state?: */
4831 static bool nfsd4_cb_channel_good(struct nfs4_client
*clp
)
4833 if (clp
->cl_cb_state
== NFSD4_CB_UP
)
4836 * In the sessions case, since we don't have to establish a
4837 * separate connection for callbacks, we assume it's OK
4838 * until we hear otherwise:
4840 return clp
->cl_minorversion
&& clp
->cl_cb_state
== NFSD4_CB_UNKNOWN
;
4843 static struct file_lock
*nfs4_alloc_init_lease(struct nfs4_delegation
*dp
,
4846 struct file_lock
*fl
;
4848 fl
= locks_alloc_lock();
4851 fl
->fl_lmops
= &nfsd_lease_mng_ops
;
4852 fl
->fl_flags
= FL_DELEG
;
4853 fl
->fl_type
= flag
== NFS4_OPEN_DELEGATE_READ
? F_RDLCK
: F_WRLCK
;
4854 fl
->fl_end
= OFFSET_MAX
;
4855 fl
->fl_owner
= (fl_owner_t
)dp
;
4856 fl
->fl_pid
= current
->tgid
;
4857 fl
->fl_file
= dp
->dl_stid
.sc_file
->fi_deleg_file
->nf_file
;
4861 static struct nfs4_delegation
*
4862 nfs4_set_delegation(struct nfs4_client
*clp
, struct svc_fh
*fh
,
4863 struct nfs4_file
*fp
, struct nfs4_clnt_odstate
*odstate
)
4866 struct nfs4_delegation
*dp
;
4867 struct nfsd_file
*nf
;
4868 struct file_lock
*fl
;
4871 * The fi_had_conflict and nfs_get_existing_delegation checks
4872 * here are just optimizations; we'll need to recheck them at
4875 if (fp
->fi_had_conflict
)
4876 return ERR_PTR(-EAGAIN
);
4878 nf
= find_readable_file(fp
);
4880 /* We should always have a readable file here */
4882 return ERR_PTR(-EBADF
);
4884 spin_lock(&state_lock
);
4885 spin_lock(&fp
->fi_lock
);
4886 if (nfs4_delegation_exists(clp
, fp
))
4888 else if (!fp
->fi_deleg_file
) {
4889 fp
->fi_deleg_file
= nf
;
4890 /* increment early to prevent fi_deleg_file from being
4892 fp
->fi_delegees
= 1;
4896 spin_unlock(&fp
->fi_lock
);
4897 spin_unlock(&state_lock
);
4901 return ERR_PTR(status
);
4904 dp
= alloc_init_deleg(clp
, fp
, fh
, odstate
);
4908 fl
= nfs4_alloc_init_lease(dp
, NFS4_OPEN_DELEGATE_READ
);
4910 goto out_clnt_odstate
;
4912 status
= vfs_setlease(fp
->fi_deleg_file
->nf_file
, fl
->fl_type
, &fl
, NULL
);
4914 locks_free_lock(fl
);
4916 goto out_clnt_odstate
;
4918 spin_lock(&state_lock
);
4919 spin_lock(&fp
->fi_lock
);
4920 if (fp
->fi_had_conflict
)
4923 status
= hash_delegation_locked(dp
, fp
);
4924 spin_unlock(&fp
->fi_lock
);
4925 spin_unlock(&state_lock
);
4932 vfs_setlease(fp
->fi_deleg_file
->nf_file
, F_UNLCK
, NULL
, (void **)&dp
);
4934 put_clnt_odstate(dp
->dl_clnt_odstate
);
4935 nfs4_put_stid(&dp
->dl_stid
);
4938 return ERR_PTR(status
);
4941 static void nfsd4_open_deleg_none_ext(struct nfsd4_open
*open
, int status
)
4943 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4944 if (status
== -EAGAIN
)
4945 open
->op_why_no_deleg
= WND4_CONTENTION
;
4947 open
->op_why_no_deleg
= WND4_RESOURCE
;
4948 switch (open
->op_deleg_want
) {
4949 case NFS4_SHARE_WANT_READ_DELEG
:
4950 case NFS4_SHARE_WANT_WRITE_DELEG
:
4951 case NFS4_SHARE_WANT_ANY_DELEG
:
4953 case NFS4_SHARE_WANT_CANCEL
:
4954 open
->op_why_no_deleg
= WND4_CANCELLED
;
4956 case NFS4_SHARE_WANT_NO_DELEG
:
4963 * Attempt to hand out a delegation.
4965 * Note we don't support write delegations, and won't until the vfs has
4966 * proper support for them.
4969 nfs4_open_delegation(struct svc_fh
*fh
, struct nfsd4_open
*open
,
4970 struct nfs4_ol_stateid
*stp
)
4972 struct nfs4_delegation
*dp
;
4973 struct nfs4_openowner
*oo
= openowner(stp
->st_stateowner
);
4974 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
4978 cb_up
= nfsd4_cb_channel_good(oo
->oo_owner
.so_client
);
4979 open
->op_recall
= 0;
4980 switch (open
->op_claim_type
) {
4981 case NFS4_OPEN_CLAIM_PREVIOUS
:
4983 open
->op_recall
= 1;
4984 if (open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_READ
)
4987 case NFS4_OPEN_CLAIM_NULL
:
4988 case NFS4_OPEN_CLAIM_FH
:
4990 * Let's not give out any delegations till everyone's
4991 * had the chance to reclaim theirs, *and* until
4992 * NLM locks have all been reclaimed:
4994 if (locks_in_grace(clp
->net
))
4996 if (!cb_up
|| !(oo
->oo_flags
& NFS4_OO_CONFIRMED
))
4999 * Also, if the file was opened for write or
5000 * create, there's a good chance the client's
5001 * about to write to it, resulting in an
5002 * immediate recall (since we don't support
5003 * write delegations):
5005 if (open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
)
5007 if (open
->op_create
== NFS4_OPEN_CREATE
)
5013 dp
= nfs4_set_delegation(clp
, fh
, stp
->st_stid
.sc_file
, stp
->st_clnt_odstate
);
5017 memcpy(&open
->op_delegate_stateid
, &dp
->dl_stid
.sc_stateid
, sizeof(dp
->dl_stid
.sc_stateid
));
5019 dprintk("NFSD: delegation stateid=" STATEID_FMT
"\n",
5020 STATEID_VAL(&dp
->dl_stid
.sc_stateid
));
5021 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_READ
;
5022 nfs4_put_stid(&dp
->dl_stid
);
5025 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE
;
5026 if (open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
&&
5027 open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_NONE
) {
5028 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5029 open
->op_recall
= 1;
5032 /* 4.1 client asking for a delegation? */
5033 if (open
->op_deleg_want
)
5034 nfsd4_open_deleg_none_ext(open
, status
);
5038 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open
*open
,
5039 struct nfs4_delegation
*dp
)
5041 if (open
->op_deleg_want
== NFS4_SHARE_WANT_READ_DELEG
&&
5042 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
5043 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
5044 open
->op_why_no_deleg
= WND4_NOT_SUPP_DOWNGRADE
;
5045 } else if (open
->op_deleg_want
== NFS4_SHARE_WANT_WRITE_DELEG
&&
5046 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
5047 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
5048 open
->op_why_no_deleg
= WND4_NOT_SUPP_UPGRADE
;
5050 /* Otherwise the client must be confused wanting a delegation
5051 * it already has, therefore we don't return
5052 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5057 nfsd4_process_open2(struct svc_rqst
*rqstp
, struct svc_fh
*current_fh
, struct nfsd4_open
*open
)
5059 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
5060 struct nfs4_client
*cl
= open
->op_openowner
->oo_owner
.so_client
;
5061 struct nfs4_file
*fp
= NULL
;
5062 struct nfs4_ol_stateid
*stp
= NULL
;
5063 struct nfs4_delegation
*dp
= NULL
;
5065 bool new_stp
= false;
5068 * Lookup file; if found, lookup stateid and check open request,
5069 * and check for delegations in the process of being recalled.
5070 * If not found, create the nfs4_file struct
5072 fp
= find_or_add_file(open
->op_file
, ¤t_fh
->fh_handle
);
5073 if (fp
!= open
->op_file
) {
5074 status
= nfs4_check_deleg(cl
, open
, &dp
);
5077 stp
= nfsd4_find_and_lock_existing_open(fp
, open
);
5079 open
->op_file
= NULL
;
5080 status
= nfserr_bad_stateid
;
5081 if (nfsd4_is_deleg_cur(open
))
5086 stp
= init_open_stateid(fp
, open
);
5092 * OPEN the file, or upgrade an existing OPEN.
5093 * If truncate fails, the OPEN fails.
5095 * stp is already locked.
5098 /* Stateid was found, this is an OPEN upgrade */
5099 status
= nfs4_upgrade_open(rqstp
, fp
, current_fh
, stp
, open
);
5101 mutex_unlock(&stp
->st_mutex
);
5105 status
= nfs4_get_vfs_file(rqstp
, fp
, current_fh
, stp
, open
);
5107 stp
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
5108 release_open_stateid(stp
);
5109 mutex_unlock(&stp
->st_mutex
);
5113 stp
->st_clnt_odstate
= find_or_hash_clnt_odstate(fp
,
5115 if (stp
->st_clnt_odstate
== open
->op_odstate
)
5116 open
->op_odstate
= NULL
;
5119 nfs4_inc_and_copy_stateid(&open
->op_stateid
, &stp
->st_stid
);
5120 mutex_unlock(&stp
->st_mutex
);
5122 if (nfsd4_has_session(&resp
->cstate
)) {
5123 if (open
->op_deleg_want
& NFS4_SHARE_WANT_NO_DELEG
) {
5124 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
5125 open
->op_why_no_deleg
= WND4_NOT_WANTED
;
5131 * Attempt to hand out a delegation. No error return, because the
5132 * OPEN succeeds even if we fail.
5134 nfs4_open_delegation(current_fh
, open
, stp
);
5138 dprintk("%s: stateid=" STATEID_FMT
"\n", __func__
,
5139 STATEID_VAL(&stp
->st_stid
.sc_stateid
));
5141 /* 4.1 client trying to upgrade/downgrade delegation? */
5142 if (open
->op_delegate_type
== NFS4_OPEN_DELEGATE_NONE
&& dp
&&
5143 open
->op_deleg_want
)
5144 nfsd4_deleg_xgrade_none_ext(open
, dp
);
5148 if (status
== 0 && open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
)
5149 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
5151 * To finish the open response, we just need to set the rflags.
5153 open
->op_rflags
= NFS4_OPEN_RESULT_LOCKTYPE_POSIX
;
5154 if (nfsd4_has_session(&resp
->cstate
))
5155 open
->op_rflags
|= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK
;
5156 else if (!(open
->op_openowner
->oo_flags
& NFS4_OO_CONFIRMED
))
5157 open
->op_rflags
|= NFS4_OPEN_RESULT_CONFIRM
;
5160 nfs4_put_stid(&dp
->dl_stid
);
5162 nfs4_put_stid(&stp
->st_stid
);
5167 void nfsd4_cleanup_open_state(struct nfsd4_compound_state
*cstate
,
5168 struct nfsd4_open
*open
)
5170 if (open
->op_openowner
) {
5171 struct nfs4_stateowner
*so
= &open
->op_openowner
->oo_owner
;
5173 nfsd4_cstate_assign_replay(cstate
, so
);
5174 nfs4_put_stateowner(so
);
5177 kmem_cache_free(file_slab
, open
->op_file
);
5179 nfs4_put_stid(&open
->op_stp
->st_stid
);
5180 if (open
->op_odstate
)
5181 kmem_cache_free(odstate_slab
, open
->op_odstate
);
5185 nfsd4_renew(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5186 union nfsd4_op_u
*u
)
5188 clientid_t
*clid
= &u
->renew
;
5189 struct nfs4_client
*clp
;
5191 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5193 dprintk("process_renew(%08x/%08x): starting\n",
5194 clid
->cl_boot
, clid
->cl_id
);
5195 status
= lookup_clientid(clid
, cstate
, nn
, false);
5199 status
= nfserr_cb_path_down
;
5200 if (!list_empty(&clp
->cl_delegations
)
5201 && clp
->cl_cb_state
!= NFSD4_CB_UP
)
5209 nfsd4_end_grace(struct nfsd_net
*nn
)
5211 /* do nothing if grace period already ended */
5212 if (nn
->grace_ended
)
5215 nn
->grace_ended
= true;
5217 * If the server goes down again right now, an NFSv4
5218 * client will still be allowed to reclaim after it comes back up,
5219 * even if it hasn't yet had a chance to reclaim state this time.
5222 nfsd4_record_grace_done(nn
);
5224 * At this point, NFSv4 clients can still reclaim. But if the
5225 * server crashes, any that have not yet reclaimed will be out
5226 * of luck on the next boot.
5228 * (NFSv4.1+ clients are considered to have reclaimed once they
5229 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
5230 * have reclaimed after their first OPEN.)
5232 locks_end_grace(&nn
->nfsd4_manager
);
5234 * At this point, and once lockd and/or any other containers
5235 * exit their grace period, further reclaims will fail and
5236 * regular locking can resume.
5241 * If we've waited a lease period but there are still clients trying to
5242 * reclaim, wait a little longer to give them a chance to finish.
5244 static bool clients_still_reclaiming(struct nfsd_net
*nn
)
5246 time64_t double_grace_period_end
= nn
->boot_time
+
5247 2 * nn
->nfsd4_lease
;
5249 if (nn
->track_reclaim_completes
&&
5250 atomic_read(&nn
->nr_reclaim_complete
) ==
5251 nn
->reclaim_str_hashtbl_size
)
5253 if (!nn
->somebody_reclaimed
)
5255 nn
->somebody_reclaimed
= false;
5257 * If we've given them *two* lease times to reclaim, and they're
5258 * still not done, give up:
5260 if (ktime_get_boottime_seconds() > double_grace_period_end
)
5266 nfs4_laundromat(struct nfsd_net
*nn
)
5268 struct nfs4_client
*clp
;
5269 struct nfs4_openowner
*oo
;
5270 struct nfs4_delegation
*dp
;
5271 struct nfs4_ol_stateid
*stp
;
5272 struct nfsd4_blocked_lock
*nbl
;
5273 struct list_head
*pos
, *next
, reaplist
;
5274 time64_t cutoff
= ktime_get_boottime_seconds() - nn
->nfsd4_lease
;
5275 time64_t t
, new_timeo
= nn
->nfsd4_lease
;
5276 struct nfs4_cpntf_state
*cps
;
5277 copy_stateid_t
*cps_t
;
5280 dprintk("NFSD: laundromat service - starting\n");
5282 if (clients_still_reclaiming(nn
)) {
5286 dprintk("NFSD: end of grace period\n");
5287 nfsd4_end_grace(nn
);
5288 INIT_LIST_HEAD(&reaplist
);
5290 spin_lock(&nn
->s2s_cp_lock
);
5291 idr_for_each_entry(&nn
->s2s_cp_stateids
, cps_t
, i
) {
5292 cps
= container_of(cps_t
, struct nfs4_cpntf_state
, cp_stateid
);
5293 if (cps
->cp_stateid
.sc_type
== NFS4_COPYNOTIFY_STID
&&
5294 cps
->cpntf_time
> cutoff
)
5295 _free_cpntf_state_locked(nn
, cps
);
5297 spin_unlock(&nn
->s2s_cp_lock
);
5299 spin_lock(&nn
->client_lock
);
5300 list_for_each_safe(pos
, next
, &nn
->client_lru
) {
5301 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
5302 if (clp
->cl_time
> cutoff
) {
5303 t
= clp
->cl_time
- cutoff
;
5304 new_timeo
= min(new_timeo
, t
);
5307 if (mark_client_expired_locked(clp
)) {
5308 dprintk("NFSD: client in use (clientid %08x)\n",
5309 clp
->cl_clientid
.cl_id
);
5312 list_add(&clp
->cl_lru
, &reaplist
);
5314 spin_unlock(&nn
->client_lock
);
5315 list_for_each_safe(pos
, next
, &reaplist
) {
5316 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
5317 dprintk("NFSD: purging unused client (clientid %08x)\n",
5318 clp
->cl_clientid
.cl_id
);
5319 list_del_init(&clp
->cl_lru
);
5322 spin_lock(&state_lock
);
5323 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
5324 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
5325 if (dp
->dl_time
> cutoff
) {
5326 t
= dp
->dl_time
- cutoff
;
5327 new_timeo
= min(new_timeo
, t
);
5330 WARN_ON(!unhash_delegation_locked(dp
));
5331 list_add(&dp
->dl_recall_lru
, &reaplist
);
5333 spin_unlock(&state_lock
);
5334 while (!list_empty(&reaplist
)) {
5335 dp
= list_first_entry(&reaplist
, struct nfs4_delegation
,
5337 list_del_init(&dp
->dl_recall_lru
);
5338 revoke_delegation(dp
);
5341 spin_lock(&nn
->client_lock
);
5342 while (!list_empty(&nn
->close_lru
)) {
5343 oo
= list_first_entry(&nn
->close_lru
, struct nfs4_openowner
,
5345 if (oo
->oo_time
> cutoff
) {
5346 t
= oo
->oo_time
- cutoff
;
5347 new_timeo
= min(new_timeo
, t
);
5350 list_del_init(&oo
->oo_close_lru
);
5351 stp
= oo
->oo_last_closed_stid
;
5352 oo
->oo_last_closed_stid
= NULL
;
5353 spin_unlock(&nn
->client_lock
);
5354 nfs4_put_stid(&stp
->st_stid
);
5355 spin_lock(&nn
->client_lock
);
5357 spin_unlock(&nn
->client_lock
);
5360 * It's possible for a client to try and acquire an already held lock
5361 * that is being held for a long time, and then lose interest in it.
5362 * So, we clean out any un-revisited request after a lease period
5363 * under the assumption that the client is no longer interested.
5365 * RFC5661, sec. 9.6 states that the client must not rely on getting
5366 * notifications and must continue to poll for locks, even when the
5367 * server supports them. Thus this shouldn't lead to clients blocking
5368 * indefinitely once the lock does become free.
5370 BUG_ON(!list_empty(&reaplist
));
5371 spin_lock(&nn
->blocked_locks_lock
);
5372 while (!list_empty(&nn
->blocked_locks_lru
)) {
5373 nbl
= list_first_entry(&nn
->blocked_locks_lru
,
5374 struct nfsd4_blocked_lock
, nbl_lru
);
5375 if (nbl
->nbl_time
> cutoff
) {
5376 t
= nbl
->nbl_time
- cutoff
;
5377 new_timeo
= min(new_timeo
, t
);
5380 list_move(&nbl
->nbl_lru
, &reaplist
);
5381 list_del_init(&nbl
->nbl_list
);
5383 spin_unlock(&nn
->blocked_locks_lock
);
5385 while (!list_empty(&reaplist
)) {
5386 nbl
= list_first_entry(&reaplist
,
5387 struct nfsd4_blocked_lock
, nbl_lru
);
5388 list_del_init(&nbl
->nbl_lru
);
5389 free_blocked_lock(nbl
);
5392 new_timeo
= max_t(time64_t
, new_timeo
, NFSD_LAUNDROMAT_MINTIMEOUT
);
5396 static struct workqueue_struct
*laundry_wq
;
5397 static void laundromat_main(struct work_struct
*);
5400 laundromat_main(struct work_struct
*laundry
)
5403 struct delayed_work
*dwork
= to_delayed_work(laundry
);
5404 struct nfsd_net
*nn
= container_of(dwork
, struct nfsd_net
,
5407 t
= nfs4_laundromat(nn
);
5408 dprintk("NFSD: laundromat_main - sleeping for %lld seconds\n", t
);
5409 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, t
*HZ
);
5412 static inline __be32
nfs4_check_fh(struct svc_fh
*fhp
, struct nfs4_stid
*stp
)
5414 if (!fh_match(&fhp
->fh_handle
, &stp
->sc_file
->fi_fhandle
))
5415 return nfserr_bad_stateid
;
5420 access_permit_read(struct nfs4_ol_stateid
*stp
)
5422 return test_access(NFS4_SHARE_ACCESS_READ
, stp
) ||
5423 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
) ||
5424 test_access(NFS4_SHARE_ACCESS_WRITE
, stp
);
5428 access_permit_write(struct nfs4_ol_stateid
*stp
)
5430 return test_access(NFS4_SHARE_ACCESS_WRITE
, stp
) ||
5431 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
);
5435 __be32
nfs4_check_openmode(struct nfs4_ol_stateid
*stp
, int flags
)
5437 __be32 status
= nfserr_openmode
;
5439 /* For lock stateid's, we test the parent open, not the lock: */
5440 if (stp
->st_openstp
)
5441 stp
= stp
->st_openstp
;
5442 if ((flags
& WR_STATE
) && !access_permit_write(stp
))
5444 if ((flags
& RD_STATE
) && !access_permit_read(stp
))
5451 static inline __be32
5452 check_special_stateids(struct net
*net
, svc_fh
*current_fh
, stateid_t
*stateid
, int flags
)
5454 if (ONE_STATEID(stateid
) && (flags
& RD_STATE
))
5456 else if (opens_in_grace(net
)) {
5457 /* Answer in remaining cases depends on existence of
5458 * conflicting state; so we must wait out the grace period. */
5459 return nfserr_grace
;
5460 } else if (flags
& WR_STATE
)
5461 return nfs4_share_conflict(current_fh
,
5462 NFS4_SHARE_DENY_WRITE
);
5463 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
5464 return nfs4_share_conflict(current_fh
,
5465 NFS4_SHARE_DENY_READ
);
5469 * Allow READ/WRITE during grace period on recovered state only for files
5470 * that are not able to provide mandatory locking.
5473 grace_disallows_io(struct net
*net
, struct inode
*inode
)
5475 return opens_in_grace(net
) && mandatory_lock(inode
);
5478 static __be32
check_stateid_generation(stateid_t
*in
, stateid_t
*ref
, bool has_session
)
5481 * When sessions are used the stateid generation number is ignored
5484 if (has_session
&& in
->si_generation
== 0)
5487 if (in
->si_generation
== ref
->si_generation
)
5490 /* If the client sends us a stateid from the future, it's buggy: */
5491 if (nfsd4_stateid_generation_after(in
, ref
))
5492 return nfserr_bad_stateid
;
5494 * However, we could see a stateid from the past, even from a
5495 * non-buggy client. For example, if the client sends a lock
5496 * while some IO is outstanding, the lock may bump si_generation
5497 * while the IO is still in flight. The client could avoid that
5498 * situation by waiting for responses on all the IO requests,
5499 * but better performance may result in retrying IO that
5500 * receives an old_stateid error if requests are rarely
5501 * reordered in flight:
5503 return nfserr_old_stateid
;
5506 static __be32
nfsd4_stid_check_stateid_generation(stateid_t
*in
, struct nfs4_stid
*s
, bool has_session
)
5510 spin_lock(&s
->sc_lock
);
5511 ret
= nfsd4_verify_open_stid(s
);
5513 ret
= check_stateid_generation(in
, &s
->sc_stateid
, has_session
);
5514 spin_unlock(&s
->sc_lock
);
5518 static __be32
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid
*ols
)
5520 if (ols
->st_stateowner
->so_is_open_owner
&&
5521 !(openowner(ols
->st_stateowner
)->oo_flags
& NFS4_OO_CONFIRMED
))
5522 return nfserr_bad_stateid
;
5526 static __be32
nfsd4_validate_stateid(struct nfs4_client
*cl
, stateid_t
*stateid
)
5528 struct nfs4_stid
*s
;
5529 __be32 status
= nfserr_bad_stateid
;
5531 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
5532 CLOSE_STATEID(stateid
))
5534 if (!same_clid(&stateid
->si_opaque
.so_clid
, &cl
->cl_clientid
))
5536 spin_lock(&cl
->cl_lock
);
5537 s
= find_stateid_locked(cl
, stateid
);
5540 status
= nfsd4_stid_check_stateid_generation(stateid
, s
, 1);
5543 switch (s
->sc_type
) {
5544 case NFS4_DELEG_STID
:
5547 case NFS4_REVOKED_DELEG_STID
:
5548 status
= nfserr_deleg_revoked
;
5550 case NFS4_OPEN_STID
:
5551 case NFS4_LOCK_STID
:
5552 status
= nfsd4_check_openowner_confirmed(openlockstateid(s
));
5555 printk("unknown stateid type %x\n", s
->sc_type
);
5557 case NFS4_CLOSED_STID
:
5558 case NFS4_CLOSED_DELEG_STID
:
5559 status
= nfserr_bad_stateid
;
5562 spin_unlock(&cl
->cl_lock
);
5567 nfsd4_lookup_stateid(struct nfsd4_compound_state
*cstate
,
5568 stateid_t
*stateid
, unsigned char typemask
,
5569 struct nfs4_stid
**s
, struct nfsd_net
*nn
)
5572 bool return_revoked
= false;
5575 * only return revoked delegations if explicitly asked.
5576 * otherwise we report revoked or bad_stateid status.
5578 if (typemask
& NFS4_REVOKED_DELEG_STID
)
5579 return_revoked
= true;
5580 else if (typemask
& NFS4_DELEG_STID
)
5581 typemask
|= NFS4_REVOKED_DELEG_STID
;
5583 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
) ||
5584 CLOSE_STATEID(stateid
))
5585 return nfserr_bad_stateid
;
5586 status
= lookup_clientid(&stateid
->si_opaque
.so_clid
, cstate
, nn
,
5588 if (status
== nfserr_stale_clientid
) {
5589 if (cstate
->session
)
5590 return nfserr_bad_stateid
;
5591 return nfserr_stale_stateid
;
5595 *s
= find_stateid_by_type(cstate
->clp
, stateid
, typemask
);
5597 return nfserr_bad_stateid
;
5598 if (((*s
)->sc_type
== NFS4_REVOKED_DELEG_STID
) && !return_revoked
) {
5600 if (cstate
->minorversion
)
5601 return nfserr_deleg_revoked
;
5602 return nfserr_bad_stateid
;
5607 static struct nfsd_file
*
5608 nfs4_find_file(struct nfs4_stid
*s
, int flags
)
5613 switch (s
->sc_type
) {
5614 case NFS4_DELEG_STID
:
5615 if (WARN_ON_ONCE(!s
->sc_file
->fi_deleg_file
))
5617 return nfsd_file_get(s
->sc_file
->fi_deleg_file
);
5618 case NFS4_OPEN_STID
:
5619 case NFS4_LOCK_STID
:
5620 if (flags
& RD_STATE
)
5621 return find_readable_file(s
->sc_file
);
5623 return find_writeable_file(s
->sc_file
);
5631 nfs4_check_olstateid(struct nfs4_ol_stateid
*ols
, int flags
)
5635 status
= nfsd4_check_openowner_confirmed(ols
);
5638 return nfs4_check_openmode(ols
, flags
);
5642 nfs4_check_file(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct nfs4_stid
*s
,
5643 struct nfsd_file
**nfp
, int flags
)
5645 int acc
= (flags
& RD_STATE
) ? NFSD_MAY_READ
: NFSD_MAY_WRITE
;
5646 struct nfsd_file
*nf
;
5649 nf
= nfs4_find_file(s
, flags
);
5651 status
= nfsd_permission(rqstp
, fhp
->fh_export
, fhp
->fh_dentry
,
5652 acc
| NFSD_MAY_OWNER_OVERRIDE
);
5658 status
= nfsd_file_acquire(rqstp
, fhp
, acc
, &nf
);
5667 _free_cpntf_state_locked(struct nfsd_net
*nn
, struct nfs4_cpntf_state
*cps
)
5669 WARN_ON_ONCE(cps
->cp_stateid
.sc_type
!= NFS4_COPYNOTIFY_STID
);
5670 if (!refcount_dec_and_test(&cps
->cp_stateid
.sc_count
))
5672 list_del(&cps
->cp_list
);
5673 idr_remove(&nn
->s2s_cp_stateids
,
5674 cps
->cp_stateid
.stid
.si_opaque
.so_id
);
5678 * A READ from an inter server to server COPY will have a
5679 * copy stateid. Look up the copy notify stateid from the
5680 * idr structure and take a reference on it.
5682 __be32
manage_cpntf_state(struct nfsd_net
*nn
, stateid_t
*st
,
5683 struct nfs4_client
*clp
,
5684 struct nfs4_cpntf_state
**cps
)
5686 copy_stateid_t
*cps_t
;
5687 struct nfs4_cpntf_state
*state
= NULL
;
5689 if (st
->si_opaque
.so_clid
.cl_id
!= nn
->s2s_cp_cl_id
)
5690 return nfserr_bad_stateid
;
5691 spin_lock(&nn
->s2s_cp_lock
);
5692 cps_t
= idr_find(&nn
->s2s_cp_stateids
, st
->si_opaque
.so_id
);
5694 state
= container_of(cps_t
, struct nfs4_cpntf_state
,
5696 if (state
->cp_stateid
.sc_type
!= NFS4_COPYNOTIFY_STID
) {
5701 refcount_inc(&state
->cp_stateid
.sc_count
);
5703 _free_cpntf_state_locked(nn
, state
);
5706 spin_unlock(&nn
->s2s_cp_lock
);
5708 return nfserr_bad_stateid
;
5714 static __be32
find_cpntf_state(struct nfsd_net
*nn
, stateid_t
*st
,
5715 struct nfs4_stid
**stid
)
5718 struct nfs4_cpntf_state
*cps
= NULL
;
5719 struct nfsd4_compound_state cstate
;
5721 status
= manage_cpntf_state(nn
, st
, NULL
, &cps
);
5725 cps
->cpntf_time
= ktime_get_boottime_seconds();
5726 memset(&cstate
, 0, sizeof(cstate
));
5727 status
= lookup_clientid(&cps
->cp_p_clid
, &cstate
, nn
, true);
5730 status
= nfsd4_lookup_stateid(&cstate
, &cps
->cp_p_stateid
,
5731 NFS4_DELEG_STID
|NFS4_OPEN_STID
|NFS4_LOCK_STID
,
5733 put_client_renew(cstate
.clp
);
5735 nfs4_put_cpntf_state(nn
, cps
);
5739 void nfs4_put_cpntf_state(struct nfsd_net
*nn
, struct nfs4_cpntf_state
*cps
)
5741 spin_lock(&nn
->s2s_cp_lock
);
5742 _free_cpntf_state_locked(nn
, cps
);
5743 spin_unlock(&nn
->s2s_cp_lock
);
5747 * Checks for stateid operations
5750 nfs4_preprocess_stateid_op(struct svc_rqst
*rqstp
,
5751 struct nfsd4_compound_state
*cstate
, struct svc_fh
*fhp
,
5752 stateid_t
*stateid
, int flags
, struct nfsd_file
**nfp
,
5753 struct nfs4_stid
**cstid
)
5755 struct inode
*ino
= d_inode(fhp
->fh_dentry
);
5756 struct net
*net
= SVC_NET(rqstp
);
5757 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5758 struct nfs4_stid
*s
= NULL
;
5764 if (grace_disallows_io(net
, ino
))
5765 return nfserr_grace
;
5767 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
)) {
5768 status
= check_special_stateids(net
, fhp
, stateid
, flags
);
5772 status
= nfsd4_lookup_stateid(cstate
, stateid
,
5773 NFS4_DELEG_STID
|NFS4_OPEN_STID
|NFS4_LOCK_STID
,
5775 if (status
== nfserr_bad_stateid
)
5776 status
= find_cpntf_state(nn
, stateid
, &s
);
5779 status
= nfsd4_stid_check_stateid_generation(stateid
, s
,
5780 nfsd4_has_session(cstate
));
5784 switch (s
->sc_type
) {
5785 case NFS4_DELEG_STID
:
5786 status
= nfs4_check_delegmode(delegstateid(s
), flags
);
5788 case NFS4_OPEN_STID
:
5789 case NFS4_LOCK_STID
:
5790 status
= nfs4_check_olstateid(openlockstateid(s
), flags
);
5793 status
= nfserr_bad_stateid
;
5798 status
= nfs4_check_fh(fhp
, s
);
5801 if (status
== nfs_ok
&& nfp
)
5802 status
= nfs4_check_file(rqstp
, fhp
, s
, nfp
, flags
);
5805 if (!status
&& cstid
)
5814 * Test if the stateid is valid
5817 nfsd4_test_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5818 union nfsd4_op_u
*u
)
5820 struct nfsd4_test_stateid
*test_stateid
= &u
->test_stateid
;
5821 struct nfsd4_test_stateid_id
*stateid
;
5822 struct nfs4_client
*cl
= cstate
->session
->se_client
;
5824 list_for_each_entry(stateid
, &test_stateid
->ts_stateid_list
, ts_id_list
)
5825 stateid
->ts_id_status
=
5826 nfsd4_validate_stateid(cl
, &stateid
->ts_id_stateid
);
5832 nfsd4_free_lock_stateid(stateid_t
*stateid
, struct nfs4_stid
*s
)
5834 struct nfs4_ol_stateid
*stp
= openlockstateid(s
);
5837 ret
= nfsd4_lock_ol_stateid(stp
);
5841 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
5845 ret
= nfserr_locks_held
;
5846 if (check_for_locks(stp
->st_stid
.sc_file
,
5847 lockowner(stp
->st_stateowner
)))
5850 release_lock_stateid(stp
);
5854 mutex_unlock(&stp
->st_mutex
);
5861 nfsd4_free_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5862 union nfsd4_op_u
*u
)
5864 struct nfsd4_free_stateid
*free_stateid
= &u
->free_stateid
;
5865 stateid_t
*stateid
= &free_stateid
->fr_stateid
;
5866 struct nfs4_stid
*s
;
5867 struct nfs4_delegation
*dp
;
5868 struct nfs4_client
*cl
= cstate
->session
->se_client
;
5869 __be32 ret
= nfserr_bad_stateid
;
5871 spin_lock(&cl
->cl_lock
);
5872 s
= find_stateid_locked(cl
, stateid
);
5875 spin_lock(&s
->sc_lock
);
5876 switch (s
->sc_type
) {
5877 case NFS4_DELEG_STID
:
5878 ret
= nfserr_locks_held
;
5880 case NFS4_OPEN_STID
:
5881 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
5884 ret
= nfserr_locks_held
;
5886 case NFS4_LOCK_STID
:
5887 spin_unlock(&s
->sc_lock
);
5888 refcount_inc(&s
->sc_count
);
5889 spin_unlock(&cl
->cl_lock
);
5890 ret
= nfsd4_free_lock_stateid(stateid
, s
);
5892 case NFS4_REVOKED_DELEG_STID
:
5893 spin_unlock(&s
->sc_lock
);
5894 dp
= delegstateid(s
);
5895 list_del_init(&dp
->dl_recall_lru
);
5896 spin_unlock(&cl
->cl_lock
);
5900 /* Default falls through and returns nfserr_bad_stateid */
5902 spin_unlock(&s
->sc_lock
);
5904 spin_unlock(&cl
->cl_lock
);
5912 return (type
== NFS4_READW_LT
|| type
== NFS4_READ_LT
) ?
5913 RD_STATE
: WR_STATE
;
5916 static __be32
nfs4_seqid_op_checks(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
, u32 seqid
, struct nfs4_ol_stateid
*stp
)
5918 struct svc_fh
*current_fh
= &cstate
->current_fh
;
5919 struct nfs4_stateowner
*sop
= stp
->st_stateowner
;
5922 status
= nfsd4_check_seqid(cstate
, sop
, seqid
);
5925 status
= nfsd4_lock_ol_stateid(stp
);
5926 if (status
!= nfs_ok
)
5928 status
= check_stateid_generation(stateid
, &stp
->st_stid
.sc_stateid
, nfsd4_has_session(cstate
));
5929 if (status
== nfs_ok
)
5930 status
= nfs4_check_fh(current_fh
, &stp
->st_stid
);
5931 if (status
!= nfs_ok
)
5932 mutex_unlock(&stp
->st_mutex
);
5937 * Checks for sequence id mutating operations.
5940 nfs4_preprocess_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
5941 stateid_t
*stateid
, char typemask
,
5942 struct nfs4_ol_stateid
**stpp
,
5943 struct nfsd_net
*nn
)
5946 struct nfs4_stid
*s
;
5947 struct nfs4_ol_stateid
*stp
= NULL
;
5949 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT
"\n", __func__
,
5950 seqid
, STATEID_VAL(stateid
));
5953 status
= nfsd4_lookup_stateid(cstate
, stateid
, typemask
, &s
, nn
);
5956 stp
= openlockstateid(s
);
5957 nfsd4_cstate_assign_replay(cstate
, stp
->st_stateowner
);
5959 status
= nfs4_seqid_op_checks(cstate
, stateid
, seqid
, stp
);
5963 nfs4_put_stid(&stp
->st_stid
);
5967 static __be32
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
5968 stateid_t
*stateid
, struct nfs4_ol_stateid
**stpp
, struct nfsd_net
*nn
)
5971 struct nfs4_openowner
*oo
;
5972 struct nfs4_ol_stateid
*stp
;
5974 status
= nfs4_preprocess_seqid_op(cstate
, seqid
, stateid
,
5975 NFS4_OPEN_STID
, &stp
, nn
);
5978 oo
= openowner(stp
->st_stateowner
);
5979 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
5980 mutex_unlock(&stp
->st_mutex
);
5981 nfs4_put_stid(&stp
->st_stid
);
5982 return nfserr_bad_stateid
;
5989 nfsd4_open_confirm(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5990 union nfsd4_op_u
*u
)
5992 struct nfsd4_open_confirm
*oc
= &u
->open_confirm
;
5994 struct nfs4_openowner
*oo
;
5995 struct nfs4_ol_stateid
*stp
;
5996 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5998 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5999 cstate
->current_fh
.fh_dentry
);
6001 status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0);
6005 status
= nfs4_preprocess_seqid_op(cstate
,
6006 oc
->oc_seqid
, &oc
->oc_req_stateid
,
6007 NFS4_OPEN_STID
, &stp
, nn
);
6010 oo
= openowner(stp
->st_stateowner
);
6011 status
= nfserr_bad_stateid
;
6012 if (oo
->oo_flags
& NFS4_OO_CONFIRMED
) {
6013 mutex_unlock(&stp
->st_mutex
);
6016 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
6017 nfs4_inc_and_copy_stateid(&oc
->oc_resp_stateid
, &stp
->st_stid
);
6018 mutex_unlock(&stp
->st_mutex
);
6019 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT
"\n",
6020 __func__
, oc
->oc_seqid
, STATEID_VAL(&stp
->st_stid
.sc_stateid
));
6022 nfsd4_client_record_create(oo
->oo_owner
.so_client
);
6025 nfs4_put_stid(&stp
->st_stid
);
6027 nfsd4_bump_seqid(cstate
, status
);
6031 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid
*stp
, u32 access
)
6033 if (!test_access(access
, stp
))
6035 nfs4_file_put_access(stp
->st_stid
.sc_file
, access
);
6036 clear_access(access
, stp
);
6039 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid
*stp
, u32 to_access
)
6041 switch (to_access
) {
6042 case NFS4_SHARE_ACCESS_READ
:
6043 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_WRITE
);
6044 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
6046 case NFS4_SHARE_ACCESS_WRITE
:
6047 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_READ
);
6048 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
6050 case NFS4_SHARE_ACCESS_BOTH
:
6058 nfsd4_open_downgrade(struct svc_rqst
*rqstp
,
6059 struct nfsd4_compound_state
*cstate
, union nfsd4_op_u
*u
)
6061 struct nfsd4_open_downgrade
*od
= &u
->open_downgrade
;
6063 struct nfs4_ol_stateid
*stp
;
6064 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6066 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6067 cstate
->current_fh
.fh_dentry
);
6069 /* We don't yet support WANT bits: */
6070 if (od
->od_deleg_want
)
6071 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__
,
6074 status
= nfs4_preprocess_confirmed_seqid_op(cstate
, od
->od_seqid
,
6075 &od
->od_stateid
, &stp
, nn
);
6078 status
= nfserr_inval
;
6079 if (!test_access(od
->od_share_access
, stp
)) {
6080 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6081 stp
->st_access_bmap
, od
->od_share_access
);
6084 if (!test_deny(od
->od_share_deny
, stp
)) {
6085 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6086 stp
->st_deny_bmap
, od
->od_share_deny
);
6089 nfs4_stateid_downgrade(stp
, od
->od_share_access
);
6090 reset_union_bmap_deny(od
->od_share_deny
, stp
);
6091 nfs4_inc_and_copy_stateid(&od
->od_stateid
, &stp
->st_stid
);
6094 mutex_unlock(&stp
->st_mutex
);
6095 nfs4_put_stid(&stp
->st_stid
);
6097 nfsd4_bump_seqid(cstate
, status
);
6101 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid
*s
)
6103 struct nfs4_client
*clp
= s
->st_stid
.sc_client
;
6105 LIST_HEAD(reaplist
);
6107 spin_lock(&clp
->cl_lock
);
6108 unhashed
= unhash_open_stateid(s
, &reaplist
);
6110 if (clp
->cl_minorversion
) {
6112 put_ol_stateid_locked(s
, &reaplist
);
6113 spin_unlock(&clp
->cl_lock
);
6114 free_ol_stateid_reaplist(&reaplist
);
6116 spin_unlock(&clp
->cl_lock
);
6117 free_ol_stateid_reaplist(&reaplist
);
6119 move_to_close_lru(s
, clp
->net
);
6124 * nfs4_unlock_state() called after encode
6127 nfsd4_close(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6128 union nfsd4_op_u
*u
)
6130 struct nfsd4_close
*close
= &u
->close
;
6132 struct nfs4_ol_stateid
*stp
;
6133 struct net
*net
= SVC_NET(rqstp
);
6134 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6136 dprintk("NFSD: nfsd4_close on file %pd\n",
6137 cstate
->current_fh
.fh_dentry
);
6139 status
= nfs4_preprocess_seqid_op(cstate
, close
->cl_seqid
,
6141 NFS4_OPEN_STID
|NFS4_CLOSED_STID
,
6143 nfsd4_bump_seqid(cstate
, status
);
6147 stp
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
6150 * Technically we don't _really_ have to increment or copy it, since
6151 * it should just be gone after this operation and we clobber the
6152 * copied value below, but we continue to do so here just to ensure
6153 * that racing ops see that there was a state change.
6155 nfs4_inc_and_copy_stateid(&close
->cl_stateid
, &stp
->st_stid
);
6157 nfsd4_close_open_stateid(stp
);
6158 mutex_unlock(&stp
->st_mutex
);
6160 /* v4.1+ suggests that we send a special stateid in here, since the
6161 * clients should just ignore this anyway. Since this is not useful
6162 * for v4.0 clients either, we set it to the special close_stateid
6165 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
6167 memcpy(&close
->cl_stateid
, &close_stateid
, sizeof(close
->cl_stateid
));
6169 /* put reference from nfs4_preprocess_seqid_op */
6170 nfs4_put_stid(&stp
->st_stid
);
6176 nfsd4_delegreturn(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6177 union nfsd4_op_u
*u
)
6179 struct nfsd4_delegreturn
*dr
= &u
->delegreturn
;
6180 struct nfs4_delegation
*dp
;
6181 stateid_t
*stateid
= &dr
->dr_stateid
;
6182 struct nfs4_stid
*s
;
6184 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6186 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
6189 status
= nfsd4_lookup_stateid(cstate
, stateid
, NFS4_DELEG_STID
, &s
, nn
);
6192 dp
= delegstateid(s
);
6193 status
= nfsd4_stid_check_stateid_generation(stateid
, &dp
->dl_stid
, nfsd4_has_session(cstate
));
6197 destroy_delegation(dp
);
6199 nfs4_put_stid(&dp
->dl_stid
);
6205 end_offset(u64 start
, u64 len
)
6210 return end
>= start
? end
: NFS4_MAX_UINT64
;
6213 /* last octet in a range */
6215 last_byte_offset(u64 start
, u64 len
)
6221 return end
> start
? end
- 1: NFS4_MAX_UINT64
;
6225 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
6226 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
6227 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
6228 * locking, this prevents us from being completely protocol-compliant. The
6229 * real solution to this problem is to start using unsigned file offsets in
6230 * the VFS, but this is a very deep change!
6233 nfs4_transform_lock_offset(struct file_lock
*lock
)
6235 if (lock
->fl_start
< 0)
6236 lock
->fl_start
= OFFSET_MAX
;
6237 if (lock
->fl_end
< 0)
6238 lock
->fl_end
= OFFSET_MAX
;
6242 nfsd4_fl_get_owner(fl_owner_t owner
)
6244 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
6246 nfs4_get_stateowner(&lo
->lo_owner
);
6251 nfsd4_fl_put_owner(fl_owner_t owner
)
6253 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)owner
;
6256 nfs4_put_stateowner(&lo
->lo_owner
);
6260 nfsd4_lm_notify(struct file_lock
*fl
)
6262 struct nfs4_lockowner
*lo
= (struct nfs4_lockowner
*)fl
->fl_owner
;
6263 struct net
*net
= lo
->lo_owner
.so_client
->net
;
6264 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6265 struct nfsd4_blocked_lock
*nbl
= container_of(fl
,
6266 struct nfsd4_blocked_lock
, nbl_lock
);
6269 /* An empty list means that something else is going to be using it */
6270 spin_lock(&nn
->blocked_locks_lock
);
6271 if (!list_empty(&nbl
->nbl_list
)) {
6272 list_del_init(&nbl
->nbl_list
);
6273 list_del_init(&nbl
->nbl_lru
);
6276 spin_unlock(&nn
->blocked_locks_lock
);
6279 nfsd4_run_cb(&nbl
->nbl_cb
);
6282 static const struct lock_manager_operations nfsd_posix_mng_ops
= {
6283 .lm_notify
= nfsd4_lm_notify
,
6284 .lm_get_owner
= nfsd4_fl_get_owner
,
6285 .lm_put_owner
= nfsd4_fl_put_owner
,
6289 nfs4_set_lock_denied(struct file_lock
*fl
, struct nfsd4_lock_denied
*deny
)
6291 struct nfs4_lockowner
*lo
;
6293 if (fl
->fl_lmops
== &nfsd_posix_mng_ops
) {
6294 lo
= (struct nfs4_lockowner
*) fl
->fl_owner
;
6295 xdr_netobj_dup(&deny
->ld_owner
, &lo
->lo_owner
.so_owner
,
6297 if (!deny
->ld_owner
.data
)
6298 /* We just don't care that much */
6300 deny
->ld_clientid
= lo
->lo_owner
.so_client
->cl_clientid
;
6303 deny
->ld_owner
.len
= 0;
6304 deny
->ld_owner
.data
= NULL
;
6305 deny
->ld_clientid
.cl_boot
= 0;
6306 deny
->ld_clientid
.cl_id
= 0;
6308 deny
->ld_start
= fl
->fl_start
;
6309 deny
->ld_length
= NFS4_MAX_UINT64
;
6310 if (fl
->fl_end
!= NFS4_MAX_UINT64
)
6311 deny
->ld_length
= fl
->fl_end
- fl
->fl_start
+ 1;
6312 deny
->ld_type
= NFS4_READ_LT
;
6313 if (fl
->fl_type
!= F_RDLCK
)
6314 deny
->ld_type
= NFS4_WRITE_LT
;
6317 static struct nfs4_lockowner
*
6318 find_lockowner_str_locked(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
6320 unsigned int strhashval
= ownerstr_hashval(owner
);
6321 struct nfs4_stateowner
*so
;
6323 lockdep_assert_held(&clp
->cl_lock
);
6325 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[strhashval
],
6327 if (so
->so_is_open_owner
)
6329 if (same_owner_str(so
, owner
))
6330 return lockowner(nfs4_get_stateowner(so
));
6335 static struct nfs4_lockowner
*
6336 find_lockowner_str(struct nfs4_client
*clp
, struct xdr_netobj
*owner
)
6338 struct nfs4_lockowner
*lo
;
6340 spin_lock(&clp
->cl_lock
);
6341 lo
= find_lockowner_str_locked(clp
, owner
);
6342 spin_unlock(&clp
->cl_lock
);
6346 static void nfs4_unhash_lockowner(struct nfs4_stateowner
*sop
)
6348 unhash_lockowner_locked(lockowner(sop
));
6351 static void nfs4_free_lockowner(struct nfs4_stateowner
*sop
)
6353 struct nfs4_lockowner
*lo
= lockowner(sop
);
6355 kmem_cache_free(lockowner_slab
, lo
);
6358 static const struct nfs4_stateowner_operations lockowner_ops
= {
6359 .so_unhash
= nfs4_unhash_lockowner
,
6360 .so_free
= nfs4_free_lockowner
,
6364 * Alloc a lock owner structure.
6365 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
6368 * strhashval = ownerstr_hashval
6370 static struct nfs4_lockowner
*
6371 alloc_init_lock_stateowner(unsigned int strhashval
, struct nfs4_client
*clp
,
6372 struct nfs4_ol_stateid
*open_stp
,
6373 struct nfsd4_lock
*lock
)
6375 struct nfs4_lockowner
*lo
, *ret
;
6377 lo
= alloc_stateowner(lockowner_slab
, &lock
->lk_new_owner
, clp
);
6380 INIT_LIST_HEAD(&lo
->lo_blocked
);
6381 INIT_LIST_HEAD(&lo
->lo_owner
.so_stateids
);
6382 lo
->lo_owner
.so_is_open_owner
= 0;
6383 lo
->lo_owner
.so_seqid
= lock
->lk_new_lock_seqid
;
6384 lo
->lo_owner
.so_ops
= &lockowner_ops
;
6385 spin_lock(&clp
->cl_lock
);
6386 ret
= find_lockowner_str_locked(clp
, &lock
->lk_new_owner
);
6388 list_add(&lo
->lo_owner
.so_strhash
,
6389 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
6392 nfs4_free_stateowner(&lo
->lo_owner
);
6394 spin_unlock(&clp
->cl_lock
);
6398 static struct nfs4_ol_stateid
*
6399 find_lock_stateid(const struct nfs4_lockowner
*lo
,
6400 const struct nfs4_ol_stateid
*ost
)
6402 struct nfs4_ol_stateid
*lst
;
6404 lockdep_assert_held(&ost
->st_stid
.sc_client
->cl_lock
);
6406 /* If ost is not hashed, ost->st_locks will not be valid */
6407 if (!nfs4_ol_stateid_unhashed(ost
))
6408 list_for_each_entry(lst
, &ost
->st_locks
, st_locks
) {
6409 if (lst
->st_stateowner
== &lo
->lo_owner
) {
6410 refcount_inc(&lst
->st_stid
.sc_count
);
6417 static struct nfs4_ol_stateid
*
6418 init_lock_stateid(struct nfs4_ol_stateid
*stp
, struct nfs4_lockowner
*lo
,
6419 struct nfs4_file
*fp
, struct inode
*inode
,
6420 struct nfs4_ol_stateid
*open_stp
)
6422 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
6423 struct nfs4_ol_stateid
*retstp
;
6425 mutex_init(&stp
->st_mutex
);
6426 mutex_lock_nested(&stp
->st_mutex
, OPEN_STATEID_MUTEX
);
6428 spin_lock(&clp
->cl_lock
);
6429 if (nfs4_ol_stateid_unhashed(open_stp
))
6431 retstp
= find_lock_stateid(lo
, open_stp
);
6434 refcount_inc(&stp
->st_stid
.sc_count
);
6435 stp
->st_stid
.sc_type
= NFS4_LOCK_STID
;
6436 stp
->st_stateowner
= nfs4_get_stateowner(&lo
->lo_owner
);
6438 stp
->st_stid
.sc_file
= fp
;
6439 stp
->st_access_bmap
= 0;
6440 stp
->st_deny_bmap
= open_stp
->st_deny_bmap
;
6441 stp
->st_openstp
= open_stp
;
6442 spin_lock(&fp
->fi_lock
);
6443 list_add(&stp
->st_locks
, &open_stp
->st_locks
);
6444 list_add(&stp
->st_perstateowner
, &lo
->lo_owner
.so_stateids
);
6445 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
6446 spin_unlock(&fp
->fi_lock
);
6447 spin_unlock(&clp
->cl_lock
);
6450 spin_unlock(&clp
->cl_lock
);
6451 if (nfsd4_lock_ol_stateid(retstp
) != nfs_ok
) {
6452 nfs4_put_stid(&retstp
->st_stid
);
6455 /* To keep mutex tracking happy */
6456 mutex_unlock(&stp
->st_mutex
);
6459 spin_unlock(&clp
->cl_lock
);
6460 mutex_unlock(&stp
->st_mutex
);
6464 static struct nfs4_ol_stateid
*
6465 find_or_create_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fi
,
6466 struct inode
*inode
, struct nfs4_ol_stateid
*ost
,
6469 struct nfs4_stid
*ns
= NULL
;
6470 struct nfs4_ol_stateid
*lst
;
6471 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
6472 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
6475 spin_lock(&clp
->cl_lock
);
6476 lst
= find_lock_stateid(lo
, ost
);
6477 spin_unlock(&clp
->cl_lock
);
6479 if (nfsd4_lock_ol_stateid(lst
) == nfs_ok
)
6481 nfs4_put_stid(&lst
->st_stid
);
6483 ns
= nfs4_alloc_stid(clp
, stateid_slab
, nfs4_free_lock_stateid
);
6487 lst
= init_lock_stateid(openlockstateid(ns
), lo
, fi
, inode
, ost
);
6488 if (lst
== openlockstateid(ns
))
6497 check_lock_length(u64 offset
, u64 length
)
6499 return ((length
== 0) || ((length
!= NFS4_MAX_UINT64
) &&
6500 (length
> ~offset
)));
6503 static void get_lock_access(struct nfs4_ol_stateid
*lock_stp
, u32 access
)
6505 struct nfs4_file
*fp
= lock_stp
->st_stid
.sc_file
;
6507 lockdep_assert_held(&fp
->fi_lock
);
6509 if (test_access(access
, lock_stp
))
6511 __nfs4_file_get_access(fp
, access
);
6512 set_access(access
, lock_stp
);
6516 lookup_or_create_lock_state(struct nfsd4_compound_state
*cstate
,
6517 struct nfs4_ol_stateid
*ost
,
6518 struct nfsd4_lock
*lock
,
6519 struct nfs4_ol_stateid
**plst
, bool *new)
6522 struct nfs4_file
*fi
= ost
->st_stid
.sc_file
;
6523 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
6524 struct nfs4_client
*cl
= oo
->oo_owner
.so_client
;
6525 struct inode
*inode
= d_inode(cstate
->current_fh
.fh_dentry
);
6526 struct nfs4_lockowner
*lo
;
6527 struct nfs4_ol_stateid
*lst
;
6528 unsigned int strhashval
;
6530 lo
= find_lockowner_str(cl
, &lock
->lk_new_owner
);
6532 strhashval
= ownerstr_hashval(&lock
->lk_new_owner
);
6533 lo
= alloc_init_lock_stateowner(strhashval
, cl
, ost
, lock
);
6535 return nfserr_jukebox
;
6537 /* with an existing lockowner, seqids must be the same */
6538 status
= nfserr_bad_seqid
;
6539 if (!cstate
->minorversion
&&
6540 lock
->lk_new_lock_seqid
!= lo
->lo_owner
.so_seqid
)
6544 lst
= find_or_create_lock_stateid(lo
, fi
, inode
, ost
, new);
6546 status
= nfserr_jukebox
;
6553 nfs4_put_stateowner(&lo
->lo_owner
);
6561 nfsd4_lock(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6562 union nfsd4_op_u
*u
)
6564 struct nfsd4_lock
*lock
= &u
->lock
;
6565 struct nfs4_openowner
*open_sop
= NULL
;
6566 struct nfs4_lockowner
*lock_sop
= NULL
;
6567 struct nfs4_ol_stateid
*lock_stp
= NULL
;
6568 struct nfs4_ol_stateid
*open_stp
= NULL
;
6569 struct nfs4_file
*fp
;
6570 struct nfsd_file
*nf
= NULL
;
6571 struct nfsd4_blocked_lock
*nbl
= NULL
;
6572 struct file_lock
*file_lock
= NULL
;
6573 struct file_lock
*conflock
= NULL
;
6578 unsigned char fl_type
;
6579 unsigned int fl_flags
= FL_POSIX
;
6580 struct net
*net
= SVC_NET(rqstp
);
6581 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6583 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
6584 (long long) lock
->lk_offset
,
6585 (long long) lock
->lk_length
);
6587 if (check_lock_length(lock
->lk_offset
, lock
->lk_length
))
6588 return nfserr_inval
;
6590 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
,
6591 S_IFREG
, NFSD_MAY_LOCK
))) {
6592 dprintk("NFSD: nfsd4_lock: permission denied!\n");
6596 if (lock
->lk_is_new
) {
6597 if (nfsd4_has_session(cstate
))
6598 /* See rfc 5661 18.10.3: given clientid is ignored: */
6599 memcpy(&lock
->lk_new_clientid
,
6600 &cstate
->session
->se_client
->cl_clientid
,
6601 sizeof(clientid_t
));
6603 status
= nfserr_stale_clientid
;
6604 if (STALE_CLIENTID(&lock
->lk_new_clientid
, nn
))
6607 /* validate and update open stateid and open seqid */
6608 status
= nfs4_preprocess_confirmed_seqid_op(cstate
,
6609 lock
->lk_new_open_seqid
,
6610 &lock
->lk_new_open_stateid
,
6614 mutex_unlock(&open_stp
->st_mutex
);
6615 open_sop
= openowner(open_stp
->st_stateowner
);
6616 status
= nfserr_bad_stateid
;
6617 if (!same_clid(&open_sop
->oo_owner
.so_client
->cl_clientid
,
6618 &lock
->lk_new_clientid
))
6620 status
= lookup_or_create_lock_state(cstate
, open_stp
, lock
,
6623 status
= nfs4_preprocess_seqid_op(cstate
,
6624 lock
->lk_old_lock_seqid
,
6625 &lock
->lk_old_lock_stateid
,
6626 NFS4_LOCK_STID
, &lock_stp
, nn
);
6630 lock_sop
= lockowner(lock_stp
->st_stateowner
);
6632 lkflg
= setlkflg(lock
->lk_type
);
6633 status
= nfs4_check_openmode(lock_stp
, lkflg
);
6637 status
= nfserr_grace
;
6638 if (locks_in_grace(net
) && !lock
->lk_reclaim
)
6640 status
= nfserr_no_grace
;
6641 if (!locks_in_grace(net
) && lock
->lk_reclaim
)
6644 fp
= lock_stp
->st_stid
.sc_file
;
6645 switch (lock
->lk_type
) {
6647 if (nfsd4_has_session(cstate
))
6648 fl_flags
|= FL_SLEEP
;
6651 spin_lock(&fp
->fi_lock
);
6652 nf
= find_readable_file_locked(fp
);
6654 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_READ
);
6655 spin_unlock(&fp
->fi_lock
);
6658 case NFS4_WRITEW_LT
:
6659 if (nfsd4_has_session(cstate
))
6660 fl_flags
|= FL_SLEEP
;
6663 spin_lock(&fp
->fi_lock
);
6664 nf
= find_writeable_file_locked(fp
);
6666 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_WRITE
);
6667 spin_unlock(&fp
->fi_lock
);
6671 status
= nfserr_inval
;
6676 status
= nfserr_openmode
;
6680 nbl
= find_or_allocate_block(lock_sop
, &fp
->fi_fhandle
, nn
);
6682 dprintk("NFSD: %s: unable to allocate block!\n", __func__
);
6683 status
= nfserr_jukebox
;
6687 file_lock
= &nbl
->nbl_lock
;
6688 file_lock
->fl_type
= fl_type
;
6689 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(&lock_sop
->lo_owner
));
6690 file_lock
->fl_pid
= current
->tgid
;
6691 file_lock
->fl_file
= nf
->nf_file
;
6692 file_lock
->fl_flags
= fl_flags
;
6693 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
6694 file_lock
->fl_start
= lock
->lk_offset
;
6695 file_lock
->fl_end
= last_byte_offset(lock
->lk_offset
, lock
->lk_length
);
6696 nfs4_transform_lock_offset(file_lock
);
6698 conflock
= locks_alloc_lock();
6700 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6701 status
= nfserr_jukebox
;
6705 if (fl_flags
& FL_SLEEP
) {
6706 nbl
->nbl_time
= ktime_get_boottime_seconds();
6707 spin_lock(&nn
->blocked_locks_lock
);
6708 list_add_tail(&nbl
->nbl_list
, &lock_sop
->lo_blocked
);
6709 list_add_tail(&nbl
->nbl_lru
, &nn
->blocked_locks_lru
);
6710 spin_unlock(&nn
->blocked_locks_lock
);
6713 err
= vfs_lock_file(nf
->nf_file
, F_SETLK
, file_lock
, conflock
);
6715 case 0: /* success! */
6716 nfs4_inc_and_copy_stateid(&lock
->lk_resp_stateid
, &lock_stp
->st_stid
);
6718 if (lock
->lk_reclaim
)
6719 nn
->somebody_reclaimed
= true;
6721 case FILE_LOCK_DEFERRED
:
6724 case -EAGAIN
: /* conflock holds conflicting lock */
6725 status
= nfserr_denied
;
6726 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6727 nfs4_set_lock_denied(conflock
, &lock
->lk_denied
);
6730 status
= nfserr_deadlock
;
6733 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err
);
6734 status
= nfserrno(err
);
6739 /* dequeue it if we queued it before */
6740 if (fl_flags
& FL_SLEEP
) {
6741 spin_lock(&nn
->blocked_locks_lock
);
6742 list_del_init(&nbl
->nbl_list
);
6743 list_del_init(&nbl
->nbl_lru
);
6744 spin_unlock(&nn
->blocked_locks_lock
);
6746 free_blocked_lock(nbl
);
6751 /* Bump seqid manually if the 4.0 replay owner is openowner */
6752 if (cstate
->replay_owner
&&
6753 cstate
->replay_owner
!= &lock_sop
->lo_owner
&&
6754 seqid_mutating_err(ntohl(status
)))
6755 lock_sop
->lo_owner
.so_seqid
++;
6758 * If this is a new, never-before-used stateid, and we are
6759 * returning an error, then just go ahead and release it.
6762 release_lock_stateid(lock_stp
);
6764 mutex_unlock(&lock_stp
->st_mutex
);
6766 nfs4_put_stid(&lock_stp
->st_stid
);
6769 nfs4_put_stid(&open_stp
->st_stid
);
6770 nfsd4_bump_seqid(cstate
, status
);
6772 locks_free_lock(conflock
);
6777 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6778 * so we do a temporary open here just to get an open file to pass to
6779 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6782 static __be32
nfsd_test_lock(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct file_lock
*lock
)
6784 struct nfsd_file
*nf
;
6785 __be32 err
= nfsd_file_acquire(rqstp
, fhp
, NFSD_MAY_READ
, &nf
);
6787 err
= nfserrno(vfs_test_lock(nf
->nf_file
, lock
));
6797 nfsd4_lockt(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6798 union nfsd4_op_u
*u
)
6800 struct nfsd4_lockt
*lockt
= &u
->lockt
;
6801 struct file_lock
*file_lock
= NULL
;
6802 struct nfs4_lockowner
*lo
= NULL
;
6804 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6806 if (locks_in_grace(SVC_NET(rqstp
)))
6807 return nfserr_grace
;
6809 if (check_lock_length(lockt
->lt_offset
, lockt
->lt_length
))
6810 return nfserr_inval
;
6812 if (!nfsd4_has_session(cstate
)) {
6813 status
= lookup_clientid(&lockt
->lt_clientid
, cstate
, nn
,
6819 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
6822 file_lock
= locks_alloc_lock();
6824 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6825 status
= nfserr_jukebox
;
6829 switch (lockt
->lt_type
) {
6832 file_lock
->fl_type
= F_RDLCK
;
6835 case NFS4_WRITEW_LT
:
6836 file_lock
->fl_type
= F_WRLCK
;
6839 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6840 status
= nfserr_inval
;
6844 lo
= find_lockowner_str(cstate
->clp
, &lockt
->lt_owner
);
6846 file_lock
->fl_owner
= (fl_owner_t
)lo
;
6847 file_lock
->fl_pid
= current
->tgid
;
6848 file_lock
->fl_flags
= FL_POSIX
;
6850 file_lock
->fl_start
= lockt
->lt_offset
;
6851 file_lock
->fl_end
= last_byte_offset(lockt
->lt_offset
, lockt
->lt_length
);
6853 nfs4_transform_lock_offset(file_lock
);
6855 status
= nfsd_test_lock(rqstp
, &cstate
->current_fh
, file_lock
);
6859 if (file_lock
->fl_type
!= F_UNLCK
) {
6860 status
= nfserr_denied
;
6861 nfs4_set_lock_denied(file_lock
, &lockt
->lt_denied
);
6865 nfs4_put_stateowner(&lo
->lo_owner
);
6867 locks_free_lock(file_lock
);
6872 nfsd4_locku(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
6873 union nfsd4_op_u
*u
)
6875 struct nfsd4_locku
*locku
= &u
->locku
;
6876 struct nfs4_ol_stateid
*stp
;
6877 struct nfsd_file
*nf
= NULL
;
6878 struct file_lock
*file_lock
= NULL
;
6881 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6883 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6884 (long long) locku
->lu_offset
,
6885 (long long) locku
->lu_length
);
6887 if (check_lock_length(locku
->lu_offset
, locku
->lu_length
))
6888 return nfserr_inval
;
6890 status
= nfs4_preprocess_seqid_op(cstate
, locku
->lu_seqid
,
6891 &locku
->lu_stateid
, NFS4_LOCK_STID
,
6895 nf
= find_any_file(stp
->st_stid
.sc_file
);
6897 status
= nfserr_lock_range
;
6900 file_lock
= locks_alloc_lock();
6902 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
6903 status
= nfserr_jukebox
;
6907 file_lock
->fl_type
= F_UNLCK
;
6908 file_lock
->fl_owner
= (fl_owner_t
)lockowner(nfs4_get_stateowner(stp
->st_stateowner
));
6909 file_lock
->fl_pid
= current
->tgid
;
6910 file_lock
->fl_file
= nf
->nf_file
;
6911 file_lock
->fl_flags
= FL_POSIX
;
6912 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
6913 file_lock
->fl_start
= locku
->lu_offset
;
6915 file_lock
->fl_end
= last_byte_offset(locku
->lu_offset
,
6917 nfs4_transform_lock_offset(file_lock
);
6919 err
= vfs_lock_file(nf
->nf_file
, F_SETLK
, file_lock
, NULL
);
6921 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6924 nfs4_inc_and_copy_stateid(&locku
->lu_stateid
, &stp
->st_stid
);
6928 mutex_unlock(&stp
->st_mutex
);
6929 nfs4_put_stid(&stp
->st_stid
);
6931 nfsd4_bump_seqid(cstate
, status
);
6933 locks_free_lock(file_lock
);
6937 status
= nfserrno(err
);
6943 * true: locks held by lockowner
6944 * false: no locks held by lockowner
6947 check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
)
6949 struct file_lock
*fl
;
6951 struct nfsd_file
*nf
= find_any_file(fp
);
6952 struct inode
*inode
;
6953 struct file_lock_context
*flctx
;
6956 /* Any valid lock stateid should have some sort of access */
6961 inode
= locks_inode(nf
->nf_file
);
6962 flctx
= inode
->i_flctx
;
6964 if (flctx
&& !list_empty_careful(&flctx
->flc_posix
)) {
6965 spin_lock(&flctx
->flc_lock
);
6966 list_for_each_entry(fl
, &flctx
->flc_posix
, fl_list
) {
6967 if (fl
->fl_owner
== (fl_owner_t
)lowner
) {
6972 spin_unlock(&flctx
->flc_lock
);
6979 nfsd4_release_lockowner(struct svc_rqst
*rqstp
,
6980 struct nfsd4_compound_state
*cstate
,
6981 union nfsd4_op_u
*u
)
6983 struct nfsd4_release_lockowner
*rlockowner
= &u
->release_lockowner
;
6984 clientid_t
*clid
= &rlockowner
->rl_clientid
;
6985 struct nfs4_stateowner
*sop
;
6986 struct nfs4_lockowner
*lo
= NULL
;
6987 struct nfs4_ol_stateid
*stp
;
6988 struct xdr_netobj
*owner
= &rlockowner
->rl_owner
;
6989 unsigned int hashval
= ownerstr_hashval(owner
);
6991 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
6992 struct nfs4_client
*clp
;
6993 LIST_HEAD (reaplist
);
6995 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6996 clid
->cl_boot
, clid
->cl_id
);
6998 status
= lookup_clientid(clid
, cstate
, nn
, false);
7003 /* Find the matching lock stateowner */
7004 spin_lock(&clp
->cl_lock
);
7005 list_for_each_entry(sop
, &clp
->cl_ownerstr_hashtbl
[hashval
],
7008 if (sop
->so_is_open_owner
|| !same_owner_str(sop
, owner
))
7011 /* see if there are still any locks associated with it */
7012 lo
= lockowner(sop
);
7013 list_for_each_entry(stp
, &sop
->so_stateids
, st_perstateowner
) {
7014 if (check_for_locks(stp
->st_stid
.sc_file
, lo
)) {
7015 status
= nfserr_locks_held
;
7016 spin_unlock(&clp
->cl_lock
);
7021 nfs4_get_stateowner(sop
);
7025 spin_unlock(&clp
->cl_lock
);
7029 unhash_lockowner_locked(lo
);
7030 while (!list_empty(&lo
->lo_owner
.so_stateids
)) {
7031 stp
= list_first_entry(&lo
->lo_owner
.so_stateids
,
7032 struct nfs4_ol_stateid
,
7034 WARN_ON(!unhash_lock_stateid(stp
));
7035 put_ol_stateid_locked(stp
, &reaplist
);
7037 spin_unlock(&clp
->cl_lock
);
7038 free_ol_stateid_reaplist(&reaplist
);
7039 remove_blocked_locks(lo
);
7040 nfs4_put_stateowner(&lo
->lo_owner
);
7045 static inline struct nfs4_client_reclaim
*
7048 return kmalloc(sizeof(struct nfs4_client_reclaim
), GFP_KERNEL
);
7052 nfs4_has_reclaimed_state(struct xdr_netobj name
, struct nfsd_net
*nn
)
7054 struct nfs4_client_reclaim
*crp
;
7056 crp
= nfsd4_find_reclaim_client(name
, nn
);
7057 return (crp
&& crp
->cr_clp
);
7061 * failure => all reset bets are off, nfserr_no_grace...
7063 * The caller is responsible for freeing name.data if NULL is returned (it
7064 * will be freed in nfs4_remove_reclaim_record in the normal case).
7066 struct nfs4_client_reclaim
*
7067 nfs4_client_to_reclaim(struct xdr_netobj name
, struct xdr_netobj princhash
,
7068 struct nfsd_net
*nn
)
7070 unsigned int strhashval
;
7071 struct nfs4_client_reclaim
*crp
;
7073 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", name
.len
, name
.data
);
7074 crp
= alloc_reclaim();
7076 strhashval
= clientstr_hashval(name
);
7077 INIT_LIST_HEAD(&crp
->cr_strhash
);
7078 list_add(&crp
->cr_strhash
, &nn
->reclaim_str_hashtbl
[strhashval
]);
7079 crp
->cr_name
.data
= name
.data
;
7080 crp
->cr_name
.len
= name
.len
;
7081 crp
->cr_princhash
.data
= princhash
.data
;
7082 crp
->cr_princhash
.len
= princhash
.len
;
7084 nn
->reclaim_str_hashtbl_size
++;
7090 nfs4_remove_reclaim_record(struct nfs4_client_reclaim
*crp
, struct nfsd_net
*nn
)
7092 list_del(&crp
->cr_strhash
);
7093 kfree(crp
->cr_name
.data
);
7094 kfree(crp
->cr_princhash
.data
);
7096 nn
->reclaim_str_hashtbl_size
--;
7100 nfs4_release_reclaim(struct nfsd_net
*nn
)
7102 struct nfs4_client_reclaim
*crp
= NULL
;
7105 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7106 while (!list_empty(&nn
->reclaim_str_hashtbl
[i
])) {
7107 crp
= list_entry(nn
->reclaim_str_hashtbl
[i
].next
,
7108 struct nfs4_client_reclaim
, cr_strhash
);
7109 nfs4_remove_reclaim_record(crp
, nn
);
7112 WARN_ON_ONCE(nn
->reclaim_str_hashtbl_size
);
7116 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
7117 struct nfs4_client_reclaim
*
7118 nfsd4_find_reclaim_client(struct xdr_netobj name
, struct nfsd_net
*nn
)
7120 unsigned int strhashval
;
7121 struct nfs4_client_reclaim
*crp
= NULL
;
7123 dprintk("NFSD: nfs4_find_reclaim_client for name %.*s\n", name
.len
, name
.data
);
7125 strhashval
= clientstr_hashval(name
);
7126 list_for_each_entry(crp
, &nn
->reclaim_str_hashtbl
[strhashval
], cr_strhash
) {
7127 if (compare_blob(&crp
->cr_name
, &name
) == 0) {
7135 * Called from OPEN. Look for clientid in reclaim list.
7138 nfs4_check_open_reclaim(clientid_t
*clid
,
7139 struct nfsd4_compound_state
*cstate
,
7140 struct nfsd_net
*nn
)
7144 /* find clientid in conf_id_hashtbl */
7145 status
= lookup_clientid(clid
, cstate
, nn
, false);
7147 return nfserr_reclaim_bad
;
7149 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
, &cstate
->clp
->cl_flags
))
7150 return nfserr_no_grace
;
7152 if (nfsd4_client_record_check(cstate
->clp
))
7153 return nfserr_reclaim_bad
;
7158 #ifdef CONFIG_NFSD_FAULT_INJECTION
7160 put_client(struct nfs4_client
*clp
)
7162 atomic_dec(&clp
->cl_rpc_users
);
7165 static struct nfs4_client
*
7166 nfsd_find_client(struct sockaddr_storage
*addr
, size_t addr_size
)
7168 struct nfs4_client
*clp
;
7169 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7172 if (!nfsd_netns_ready(nn
))
7175 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
7176 if (memcmp(&clp
->cl_addr
, addr
, addr_size
) == 0)
7183 nfsd_inject_print_clients(void)
7185 struct nfs4_client
*clp
;
7187 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7189 char buf
[INET6_ADDRSTRLEN
];
7191 if (!nfsd_netns_ready(nn
))
7194 spin_lock(&nn
->client_lock
);
7195 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
7196 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
7197 pr_info("NFS Client: %s\n", buf
);
7200 spin_unlock(&nn
->client_lock
);
7206 nfsd_inject_forget_client(struct sockaddr_storage
*addr
, size_t addr_size
)
7209 struct nfs4_client
*clp
;
7210 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7213 if (!nfsd_netns_ready(nn
))
7216 spin_lock(&nn
->client_lock
);
7217 clp
= nfsd_find_client(addr
, addr_size
);
7219 if (mark_client_expired_locked(clp
) == nfs_ok
)
7224 spin_unlock(&nn
->client_lock
);
7233 nfsd_inject_forget_clients(u64 max
)
7236 struct nfs4_client
*clp
, *next
;
7237 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7239 LIST_HEAD(reaplist
);
7241 if (!nfsd_netns_ready(nn
))
7244 spin_lock(&nn
->client_lock
);
7245 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
7246 if (mark_client_expired_locked(clp
) == nfs_ok
) {
7247 list_add(&clp
->cl_lru
, &reaplist
);
7248 if (max
!= 0 && ++count
>= max
)
7252 spin_unlock(&nn
->client_lock
);
7254 list_for_each_entry_safe(clp
, next
, &reaplist
, cl_lru
)
7260 static void nfsd_print_count(struct nfs4_client
*clp
, unsigned int count
,
7263 char buf
[INET6_ADDRSTRLEN
];
7264 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
7265 printk(KERN_INFO
"NFS Client: %s has %u %s\n", buf
, count
, type
);
7269 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid
*lst
,
7270 struct list_head
*collect
)
7272 struct nfs4_client
*clp
= lst
->st_stid
.sc_client
;
7273 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7279 lockdep_assert_held(&nn
->client_lock
);
7280 atomic_inc(&clp
->cl_rpc_users
);
7281 list_add(&lst
->st_locks
, collect
);
7284 static u64
nfsd_foreach_client_lock(struct nfs4_client
*clp
, u64 max
,
7285 struct list_head
*collect
,
7286 bool (*func
)(struct nfs4_ol_stateid
*))
7288 struct nfs4_openowner
*oop
;
7289 struct nfs4_ol_stateid
*stp
, *st_next
;
7290 struct nfs4_ol_stateid
*lst
, *lst_next
;
7293 spin_lock(&clp
->cl_lock
);
7294 list_for_each_entry(oop
, &clp
->cl_openowners
, oo_perclient
) {
7295 list_for_each_entry_safe(stp
, st_next
,
7296 &oop
->oo_owner
.so_stateids
, st_perstateowner
) {
7297 list_for_each_entry_safe(lst
, lst_next
,
7298 &stp
->st_locks
, st_locks
) {
7301 nfsd_inject_add_lock_to_list(lst
,
7306 * Despite the fact that these functions deal
7307 * with 64-bit integers for "count", we must
7308 * ensure that it doesn't blow up the
7309 * clp->cl_rpc_users. Throw a warning if we
7310 * start to approach INT_MAX here.
7312 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
7319 spin_unlock(&clp
->cl_lock
);
7325 nfsd_collect_client_locks(struct nfs4_client
*clp
, struct list_head
*collect
,
7328 return nfsd_foreach_client_lock(clp
, max
, collect
, unhash_lock_stateid
);
7332 nfsd_print_client_locks(struct nfs4_client
*clp
)
7334 u64 count
= nfsd_foreach_client_lock(clp
, 0, NULL
, NULL
);
7335 nfsd_print_count(clp
, count
, "locked files");
7340 nfsd_inject_print_locks(void)
7342 struct nfs4_client
*clp
;
7344 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7347 if (!nfsd_netns_ready(nn
))
7350 spin_lock(&nn
->client_lock
);
7351 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
7352 count
+= nfsd_print_client_locks(clp
);
7353 spin_unlock(&nn
->client_lock
);
7359 nfsd_reap_locks(struct list_head
*reaplist
)
7361 struct nfs4_client
*clp
;
7362 struct nfs4_ol_stateid
*stp
, *next
;
7364 list_for_each_entry_safe(stp
, next
, reaplist
, st_locks
) {
7365 list_del_init(&stp
->st_locks
);
7366 clp
= stp
->st_stid
.sc_client
;
7367 nfs4_put_stid(&stp
->st_stid
);
7373 nfsd_inject_forget_client_locks(struct sockaddr_storage
*addr
, size_t addr_size
)
7375 unsigned int count
= 0;
7376 struct nfs4_client
*clp
;
7377 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7379 LIST_HEAD(reaplist
);
7381 if (!nfsd_netns_ready(nn
))
7384 spin_lock(&nn
->client_lock
);
7385 clp
= nfsd_find_client(addr
, addr_size
);
7387 count
= nfsd_collect_client_locks(clp
, &reaplist
, 0);
7388 spin_unlock(&nn
->client_lock
);
7389 nfsd_reap_locks(&reaplist
);
7394 nfsd_inject_forget_locks(u64 max
)
7397 struct nfs4_client
*clp
;
7398 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7400 LIST_HEAD(reaplist
);
7402 if (!nfsd_netns_ready(nn
))
7405 spin_lock(&nn
->client_lock
);
7406 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
7407 count
+= nfsd_collect_client_locks(clp
, &reaplist
, max
- count
);
7408 if (max
!= 0 && count
>= max
)
7411 spin_unlock(&nn
->client_lock
);
7412 nfsd_reap_locks(&reaplist
);
7417 nfsd_foreach_client_openowner(struct nfs4_client
*clp
, u64 max
,
7418 struct list_head
*collect
,
7419 void (*func
)(struct nfs4_openowner
*))
7421 struct nfs4_openowner
*oop
, *next
;
7422 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7426 lockdep_assert_held(&nn
->client_lock
);
7428 spin_lock(&clp
->cl_lock
);
7429 list_for_each_entry_safe(oop
, next
, &clp
->cl_openowners
, oo_perclient
) {
7433 atomic_inc(&clp
->cl_rpc_users
);
7434 list_add(&oop
->oo_perclient
, collect
);
7439 * Despite the fact that these functions deal with
7440 * 64-bit integers for "count", we must ensure that
7441 * it doesn't blow up the clp->cl_rpc_users. Throw a
7442 * warning if we start to approach INT_MAX here.
7444 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
7448 spin_unlock(&clp
->cl_lock
);
7454 nfsd_print_client_openowners(struct nfs4_client
*clp
)
7456 u64 count
= nfsd_foreach_client_openowner(clp
, 0, NULL
, NULL
);
7458 nfsd_print_count(clp
, count
, "openowners");
7463 nfsd_collect_client_openowners(struct nfs4_client
*clp
,
7464 struct list_head
*collect
, u64 max
)
7466 return nfsd_foreach_client_openowner(clp
, max
, collect
,
7467 unhash_openowner_locked
);
7471 nfsd_inject_print_openowners(void)
7473 struct nfs4_client
*clp
;
7475 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7478 if (!nfsd_netns_ready(nn
))
7481 spin_lock(&nn
->client_lock
);
7482 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
7483 count
+= nfsd_print_client_openowners(clp
);
7484 spin_unlock(&nn
->client_lock
);
7490 nfsd_reap_openowners(struct list_head
*reaplist
)
7492 struct nfs4_client
*clp
;
7493 struct nfs4_openowner
*oop
, *next
;
7495 list_for_each_entry_safe(oop
, next
, reaplist
, oo_perclient
) {
7496 list_del_init(&oop
->oo_perclient
);
7497 clp
= oop
->oo_owner
.so_client
;
7498 release_openowner(oop
);
7504 nfsd_inject_forget_client_openowners(struct sockaddr_storage
*addr
,
7507 unsigned int count
= 0;
7508 struct nfs4_client
*clp
;
7509 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7511 LIST_HEAD(reaplist
);
7513 if (!nfsd_netns_ready(nn
))
7516 spin_lock(&nn
->client_lock
);
7517 clp
= nfsd_find_client(addr
, addr_size
);
7519 count
= nfsd_collect_client_openowners(clp
, &reaplist
, 0);
7520 spin_unlock(&nn
->client_lock
);
7521 nfsd_reap_openowners(&reaplist
);
7526 nfsd_inject_forget_openowners(u64 max
)
7529 struct nfs4_client
*clp
;
7530 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7532 LIST_HEAD(reaplist
);
7534 if (!nfsd_netns_ready(nn
))
7537 spin_lock(&nn
->client_lock
);
7538 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
7539 count
+= nfsd_collect_client_openowners(clp
, &reaplist
,
7541 if (max
!= 0 && count
>= max
)
7544 spin_unlock(&nn
->client_lock
);
7545 nfsd_reap_openowners(&reaplist
);
7549 static u64
nfsd_find_all_delegations(struct nfs4_client
*clp
, u64 max
,
7550 struct list_head
*victims
)
7552 struct nfs4_delegation
*dp
, *next
;
7553 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7557 lockdep_assert_held(&nn
->client_lock
);
7559 spin_lock(&state_lock
);
7560 list_for_each_entry_safe(dp
, next
, &clp
->cl_delegations
, dl_perclnt
) {
7563 * It's not safe to mess with delegations that have a
7564 * non-zero dl_time. They might have already been broken
7565 * and could be processed by the laundromat outside of
7566 * the state_lock. Just leave them be.
7568 if (dp
->dl_time
!= 0)
7571 atomic_inc(&clp
->cl_rpc_users
);
7572 WARN_ON(!unhash_delegation_locked(dp
));
7573 list_add(&dp
->dl_recall_lru
, victims
);
7577 * Despite the fact that these functions deal with
7578 * 64-bit integers for "count", we must ensure that
7579 * it doesn't blow up the clp->cl_rpc_users. Throw a
7580 * warning if we start to approach INT_MAX here.
7582 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
7586 spin_unlock(&state_lock
);
7591 nfsd_print_client_delegations(struct nfs4_client
*clp
)
7593 u64 count
= nfsd_find_all_delegations(clp
, 0, NULL
);
7595 nfsd_print_count(clp
, count
, "delegations");
7600 nfsd_inject_print_delegations(void)
7602 struct nfs4_client
*clp
;
7604 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7607 if (!nfsd_netns_ready(nn
))
7610 spin_lock(&nn
->client_lock
);
7611 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
7612 count
+= nfsd_print_client_delegations(clp
);
7613 spin_unlock(&nn
->client_lock
);
7619 nfsd_forget_delegations(struct list_head
*reaplist
)
7621 struct nfs4_client
*clp
;
7622 struct nfs4_delegation
*dp
, *next
;
7624 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
7625 list_del_init(&dp
->dl_recall_lru
);
7626 clp
= dp
->dl_stid
.sc_client
;
7627 revoke_delegation(dp
);
7633 nfsd_inject_forget_client_delegations(struct sockaddr_storage
*addr
,
7637 struct nfs4_client
*clp
;
7638 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7640 LIST_HEAD(reaplist
);
7642 if (!nfsd_netns_ready(nn
))
7645 spin_lock(&nn
->client_lock
);
7646 clp
= nfsd_find_client(addr
, addr_size
);
7648 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
7649 spin_unlock(&nn
->client_lock
);
7651 nfsd_forget_delegations(&reaplist
);
7656 nfsd_inject_forget_delegations(u64 max
)
7659 struct nfs4_client
*clp
;
7660 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7662 LIST_HEAD(reaplist
);
7664 if (!nfsd_netns_ready(nn
))
7667 spin_lock(&nn
->client_lock
);
7668 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
7669 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
7670 if (max
!= 0 && count
>= max
)
7673 spin_unlock(&nn
->client_lock
);
7674 nfsd_forget_delegations(&reaplist
);
7679 nfsd_recall_delegations(struct list_head
*reaplist
)
7681 struct nfs4_client
*clp
;
7682 struct nfs4_delegation
*dp
, *next
;
7684 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
7685 list_del_init(&dp
->dl_recall_lru
);
7686 clp
= dp
->dl_stid
.sc_client
;
7688 * We skipped all entries that had a zero dl_time before,
7689 * so we can now reset the dl_time back to 0. If a delegation
7690 * break comes in now, then it won't make any difference since
7691 * we're recalling it either way.
7693 spin_lock(&state_lock
);
7695 spin_unlock(&state_lock
);
7696 nfsd_break_one_deleg(dp
);
7702 nfsd_inject_recall_client_delegations(struct sockaddr_storage
*addr
,
7706 struct nfs4_client
*clp
;
7707 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7709 LIST_HEAD(reaplist
);
7711 if (!nfsd_netns_ready(nn
))
7714 spin_lock(&nn
->client_lock
);
7715 clp
= nfsd_find_client(addr
, addr_size
);
7717 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
7718 spin_unlock(&nn
->client_lock
);
7720 nfsd_recall_delegations(&reaplist
);
7725 nfsd_inject_recall_delegations(u64 max
)
7728 struct nfs4_client
*clp
, *next
;
7729 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
7731 LIST_HEAD(reaplist
);
7733 if (!nfsd_netns_ready(nn
))
7736 spin_lock(&nn
->client_lock
);
7737 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
7738 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
7739 if (max
!= 0 && ++count
>= max
)
7742 spin_unlock(&nn
->client_lock
);
7743 nfsd_recall_delegations(&reaplist
);
7746 #endif /* CONFIG_NFSD_FAULT_INJECTION */
7749 * Since the lifetime of a delegation isn't limited to that of an open, a
7750 * client may quite reasonably hang on to a delegation as long as it has
7751 * the inode cached. This becomes an obvious problem the first time a
7752 * client's inode cache approaches the size of the server's total memory.
7754 * For now we avoid this problem by imposing a hard limit on the number
7755 * of delegations, which varies according to the server's memory size.
7758 set_max_delegations(void)
7761 * Allow at most 4 delegations per megabyte of RAM. Quick
7762 * estimates suggest that in the worst case (where every delegation
7763 * is for a different inode), a delegation could take about 1.5K,
7764 * giving a worst case usage of about 6% of memory.
7766 max_delegations
= nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT
);
7769 static int nfs4_state_create_net(struct net
*net
)
7771 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7774 nn
->conf_id_hashtbl
= kmalloc_array(CLIENT_HASH_SIZE
,
7775 sizeof(struct list_head
),
7777 if (!nn
->conf_id_hashtbl
)
7779 nn
->unconf_id_hashtbl
= kmalloc_array(CLIENT_HASH_SIZE
,
7780 sizeof(struct list_head
),
7782 if (!nn
->unconf_id_hashtbl
)
7784 nn
->sessionid_hashtbl
= kmalloc_array(SESSION_HASH_SIZE
,
7785 sizeof(struct list_head
),
7787 if (!nn
->sessionid_hashtbl
)
7790 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7791 INIT_LIST_HEAD(&nn
->conf_id_hashtbl
[i
]);
7792 INIT_LIST_HEAD(&nn
->unconf_id_hashtbl
[i
]);
7794 for (i
= 0; i
< SESSION_HASH_SIZE
; i
++)
7795 INIT_LIST_HEAD(&nn
->sessionid_hashtbl
[i
]);
7796 nn
->conf_name_tree
= RB_ROOT
;
7797 nn
->unconf_name_tree
= RB_ROOT
;
7798 nn
->boot_time
= ktime_get_real_seconds();
7799 nn
->grace_ended
= false;
7800 nn
->nfsd4_manager
.block_opens
= true;
7801 INIT_LIST_HEAD(&nn
->nfsd4_manager
.list
);
7802 INIT_LIST_HEAD(&nn
->client_lru
);
7803 INIT_LIST_HEAD(&nn
->close_lru
);
7804 INIT_LIST_HEAD(&nn
->del_recall_lru
);
7805 spin_lock_init(&nn
->client_lock
);
7806 spin_lock_init(&nn
->s2s_cp_lock
);
7807 idr_init(&nn
->s2s_cp_stateids
);
7809 spin_lock_init(&nn
->blocked_locks_lock
);
7810 INIT_LIST_HEAD(&nn
->blocked_locks_lru
);
7812 INIT_DELAYED_WORK(&nn
->laundromat_work
, laundromat_main
);
7818 kfree(nn
->unconf_id_hashtbl
);
7820 kfree(nn
->conf_id_hashtbl
);
7826 nfs4_state_destroy_net(struct net
*net
)
7829 struct nfs4_client
*clp
= NULL
;
7830 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7832 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7833 while (!list_empty(&nn
->conf_id_hashtbl
[i
])) {
7834 clp
= list_entry(nn
->conf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
7835 destroy_client(clp
);
7839 WARN_ON(!list_empty(&nn
->blocked_locks_lru
));
7841 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
7842 while (!list_empty(&nn
->unconf_id_hashtbl
[i
])) {
7843 clp
= list_entry(nn
->unconf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
7844 destroy_client(clp
);
7848 kfree(nn
->sessionid_hashtbl
);
7849 kfree(nn
->unconf_id_hashtbl
);
7850 kfree(nn
->conf_id_hashtbl
);
7855 nfs4_state_start_net(struct net
*net
)
7857 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7860 ret
= nfs4_state_create_net(net
);
7863 locks_start_grace(net
, &nn
->nfsd4_manager
);
7864 nfsd4_client_tracking_init(net
);
7865 if (nn
->track_reclaim_completes
&& nn
->reclaim_str_hashtbl_size
== 0)
7867 printk(KERN_INFO
"NFSD: starting %lld-second grace period (net %x)\n",
7868 nn
->nfsd4_grace
, net
->ns
.inum
);
7869 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, nn
->nfsd4_grace
* HZ
);
7873 printk(KERN_INFO
"NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7875 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, nn
->nfsd4_lease
* HZ
);
7876 nfsd4_end_grace(nn
);
7880 /* initialization to perform when the nfsd service is started: */
7883 nfs4_state_start(void)
7887 laundry_wq
= alloc_workqueue("%s", WQ_UNBOUND
, 0, "nfsd4");
7888 if (laundry_wq
== NULL
) {
7892 ret
= nfsd4_create_callback_queue();
7894 goto out_free_laundry
;
7896 set_max_delegations();
7900 destroy_workqueue(laundry_wq
);
7906 nfs4_state_shutdown_net(struct net
*net
)
7908 struct nfs4_delegation
*dp
= NULL
;
7909 struct list_head
*pos
, *next
, reaplist
;
7910 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
7912 cancel_delayed_work_sync(&nn
->laundromat_work
);
7913 locks_end_grace(&nn
->nfsd4_manager
);
7915 INIT_LIST_HEAD(&reaplist
);
7916 spin_lock(&state_lock
);
7917 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
7918 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
7919 WARN_ON(!unhash_delegation_locked(dp
));
7920 list_add(&dp
->dl_recall_lru
, &reaplist
);
7922 spin_unlock(&state_lock
);
7923 list_for_each_safe(pos
, next
, &reaplist
) {
7924 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
7925 list_del_init(&dp
->dl_recall_lru
);
7926 destroy_unhashed_deleg(dp
);
7929 nfsd4_client_tracking_exit(net
);
7930 nfs4_state_destroy_net(net
);
7934 nfs4_state_shutdown(void)
7936 destroy_workqueue(laundry_wq
);
7937 nfsd4_destroy_callback_queue();
7941 get_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
7943 if (HAS_CSTATE_FLAG(cstate
, CURRENT_STATE_ID_FLAG
) &&
7944 CURRENT_STATEID(stateid
))
7945 memcpy(stateid
, &cstate
->current_stateid
, sizeof(stateid_t
));
7949 put_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
7951 if (cstate
->minorversion
) {
7952 memcpy(&cstate
->current_stateid
, stateid
, sizeof(stateid_t
));
7953 SET_CSTATE_FLAG(cstate
, CURRENT_STATE_ID_FLAG
);
7958 clear_current_stateid(struct nfsd4_compound_state
*cstate
)
7960 CLEAR_CSTATE_FLAG(cstate
, CURRENT_STATE_ID_FLAG
);
7964 * functions to set current state id
7967 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state
*cstate
,
7968 union nfsd4_op_u
*u
)
7970 put_stateid(cstate
, &u
->open_downgrade
.od_stateid
);
7974 nfsd4_set_openstateid(struct nfsd4_compound_state
*cstate
,
7975 union nfsd4_op_u
*u
)
7977 put_stateid(cstate
, &u
->open
.op_stateid
);
7981 nfsd4_set_closestateid(struct nfsd4_compound_state
*cstate
,
7982 union nfsd4_op_u
*u
)
7984 put_stateid(cstate
, &u
->close
.cl_stateid
);
7988 nfsd4_set_lockstateid(struct nfsd4_compound_state
*cstate
,
7989 union nfsd4_op_u
*u
)
7991 put_stateid(cstate
, &u
->lock
.lk_resp_stateid
);
7995 * functions to consume current state id
7999 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state
*cstate
,
8000 union nfsd4_op_u
*u
)
8002 get_stateid(cstate
, &u
->open_downgrade
.od_stateid
);
8006 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state
*cstate
,
8007 union nfsd4_op_u
*u
)
8009 get_stateid(cstate
, &u
->delegreturn
.dr_stateid
);
8013 nfsd4_get_freestateid(struct nfsd4_compound_state
*cstate
,
8014 union nfsd4_op_u
*u
)
8016 get_stateid(cstate
, &u
->free_stateid
.fr_stateid
);
8020 nfsd4_get_setattrstateid(struct nfsd4_compound_state
*cstate
,
8021 union nfsd4_op_u
*u
)
8023 get_stateid(cstate
, &u
->setattr
.sa_stateid
);
8027 nfsd4_get_closestateid(struct nfsd4_compound_state
*cstate
,
8028 union nfsd4_op_u
*u
)
8030 get_stateid(cstate
, &u
->close
.cl_stateid
);
8034 nfsd4_get_lockustateid(struct nfsd4_compound_state
*cstate
,
8035 union nfsd4_op_u
*u
)
8037 get_stateid(cstate
, &u
->locku
.lu_stateid
);
8041 nfsd4_get_readstateid(struct nfsd4_compound_state
*cstate
,
8042 union nfsd4_op_u
*u
)
8044 get_stateid(cstate
, &u
->read
.rd_stateid
);
8048 nfsd4_get_writestateid(struct nfsd4_compound_state
*cstate
,
8049 union nfsd4_op_u
*u
)
8051 get_stateid(cstate
, &u
->write
.wr_stateid
);