2 * linux/fs/nfs/delegation.c
4 * Copyright (C) 2004 Trond Myklebust
6 * NFS file delegation management
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
20 #include "delegation.h"
23 static void nfs_free_delegation(struct nfs_delegation
*delegation
)
26 put_rpccred(delegation
->cred
);
30 static int nfs_delegation_claim_locks(struct nfs_open_context
*ctx
, struct nfs4_state
*state
)
32 struct inode
*inode
= state
->inode
;
36 for (fl
= inode
->i_flock
; fl
!= 0; fl
= fl
->fl_next
) {
37 if (!(fl
->fl_flags
& (FL_POSIX
|FL_FLOCK
)))
39 if ((struct nfs_open_context
*)fl
->fl_file
->private_data
!= ctx
)
41 status
= nfs4_lock_delegation_recall(state
, fl
);
46 printk(KERN_ERR
"%s: unhandled error %d.\n",
47 __FUNCTION__
, status
);
48 case -NFS4ERR_EXPIRED
:
49 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
50 case -NFS4ERR_STALE_CLIENTID
:
51 nfs4_schedule_state_recovery(NFS_SERVER(inode
)->nfs_client
);
60 static void nfs_delegation_claim_opens(struct inode
*inode
, const nfs4_stateid
*stateid
)
62 struct nfs_inode
*nfsi
= NFS_I(inode
);
63 struct nfs_open_context
*ctx
;
64 struct nfs4_state
*state
;
68 spin_lock(&inode
->i_lock
);
69 list_for_each_entry(ctx
, &nfsi
->open_files
, list
) {
73 if (!test_bit(NFS_DELEGATED_STATE
, &state
->flags
))
75 if (memcmp(state
->stateid
.data
, stateid
->data
, sizeof(state
->stateid
.data
)) != 0)
77 get_nfs_open_context(ctx
);
78 spin_unlock(&inode
->i_lock
);
79 err
= nfs4_open_delegation_recall(ctx
, state
, stateid
);
81 err
= nfs_delegation_claim_locks(ctx
, state
);
82 put_nfs_open_context(ctx
);
87 spin_unlock(&inode
->i_lock
);
91 * Set up a delegation on an inode
93 void nfs_inode_reclaim_delegation(struct inode
*inode
, struct rpc_cred
*cred
, struct nfs_openres
*res
)
95 struct nfs_delegation
*delegation
= NFS_I(inode
)->delegation
;
97 if (delegation
== NULL
)
99 memcpy(delegation
->stateid
.data
, res
->delegation
.data
,
100 sizeof(delegation
->stateid
.data
));
101 delegation
->type
= res
->delegation_type
;
102 delegation
->maxsize
= res
->maxsize
;
104 delegation
->cred
= get_rpccred(cred
);
105 delegation
->flags
&= ~NFS_DELEGATION_NEED_RECLAIM
;
106 NFS_I(inode
)->delegation_state
= delegation
->type
;
111 * Set up a delegation on an inode
113 int nfs_inode_set_delegation(struct inode
*inode
, struct rpc_cred
*cred
, struct nfs_openres
*res
)
115 struct nfs_client
*clp
= NFS_SERVER(inode
)->nfs_client
;
116 struct nfs_inode
*nfsi
= NFS_I(inode
);
117 struct nfs_delegation
*delegation
;
120 /* Ensure we first revalidate the attributes and page cache! */
121 if ((nfsi
->cache_validity
& (NFS_INO_REVAL_PAGECACHE
|NFS_INO_INVALID_ATTR
)))
122 __nfs_revalidate_inode(NFS_SERVER(inode
), inode
);
124 delegation
= kmalloc(sizeof(*delegation
), GFP_KERNEL
);
125 if (delegation
== NULL
)
127 memcpy(delegation
->stateid
.data
, res
->delegation
.data
,
128 sizeof(delegation
->stateid
.data
));
129 delegation
->type
= res
->delegation_type
;
130 delegation
->maxsize
= res
->maxsize
;
131 delegation
->change_attr
= nfsi
->change_attr
;
132 delegation
->cred
= get_rpccred(cred
);
133 delegation
->inode
= inode
;
135 spin_lock(&clp
->cl_lock
);
136 if (nfsi
->delegation
== NULL
) {
137 list_add(&delegation
->super_list
, &clp
->cl_delegations
);
138 nfsi
->delegation
= delegation
;
139 nfsi
->delegation_state
= delegation
->type
;
142 if (memcmp(&delegation
->stateid
, &nfsi
->delegation
->stateid
,
143 sizeof(delegation
->stateid
)) != 0 ||
144 delegation
->type
!= nfsi
->delegation
->type
) {
145 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
146 __FUNCTION__
, NIPQUAD(clp
->cl_addr
.sin_addr
));
150 spin_unlock(&clp
->cl_lock
);
155 static int nfs_do_return_delegation(struct inode
*inode
, struct nfs_delegation
*delegation
)
159 res
= nfs4_proc_delegreturn(inode
, delegation
->cred
, &delegation
->stateid
);
160 nfs_free_delegation(delegation
);
164 /* Sync all data to disk upon delegation return */
165 static void nfs_msync_inode(struct inode
*inode
)
167 filemap_fdatawrite(inode
->i_mapping
);
169 filemap_fdatawait(inode
->i_mapping
);
173 * Basic procedure for returning a delegation to the server
175 static int __nfs_inode_return_delegation(struct inode
*inode
, struct nfs_delegation
*delegation
)
177 struct nfs_client
*clp
= NFS_SERVER(inode
)->nfs_client
;
178 struct nfs_inode
*nfsi
= NFS_I(inode
);
180 nfs_msync_inode(inode
);
181 down_read(&clp
->cl_sem
);
182 /* Guard against new delegated open calls */
183 down_write(&nfsi
->rwsem
);
184 nfs_delegation_claim_opens(inode
, &delegation
->stateid
);
185 up_write(&nfsi
->rwsem
);
186 up_read(&clp
->cl_sem
);
187 nfs_msync_inode(inode
);
189 return nfs_do_return_delegation(inode
, delegation
);
192 static struct nfs_delegation
*nfs_detach_delegation_locked(struct nfs_inode
*nfsi
, const nfs4_stateid
*stateid
)
194 struct nfs_delegation
*delegation
= nfsi
->delegation
;
196 if (delegation
== NULL
)
198 if (stateid
!= NULL
&& memcmp(delegation
->stateid
.data
, stateid
->data
,
199 sizeof(delegation
->stateid
.data
)) != 0)
201 list_del_init(&delegation
->super_list
);
202 nfsi
->delegation
= NULL
;
203 nfsi
->delegation_state
= 0;
209 int nfs_inode_return_delegation(struct inode
*inode
)
211 struct nfs_client
*clp
= NFS_SERVER(inode
)->nfs_client
;
212 struct nfs_inode
*nfsi
= NFS_I(inode
);
213 struct nfs_delegation
*delegation
;
216 if (nfsi
->delegation_state
!= 0) {
217 spin_lock(&clp
->cl_lock
);
218 delegation
= nfs_detach_delegation_locked(nfsi
, NULL
);
219 spin_unlock(&clp
->cl_lock
);
220 if (delegation
!= NULL
)
221 err
= __nfs_inode_return_delegation(inode
, delegation
);
227 * Return all delegations associated to a super block
229 void nfs_return_all_delegations(struct super_block
*sb
)
231 struct nfs_client
*clp
= NFS_SB(sb
)->nfs_client
;
232 struct nfs_delegation
*delegation
;
238 spin_lock(&clp
->cl_lock
);
239 list_for_each_entry(delegation
, &clp
->cl_delegations
, super_list
) {
240 if (delegation
->inode
->i_sb
!= sb
)
242 inode
= igrab(delegation
->inode
);
245 nfs_detach_delegation_locked(NFS_I(inode
), NULL
);
246 spin_unlock(&clp
->cl_lock
);
247 __nfs_inode_return_delegation(inode
, delegation
);
251 spin_unlock(&clp
->cl_lock
);
254 static int nfs_do_expire_all_delegations(void *ptr
)
256 struct nfs_client
*clp
= ptr
;
257 struct nfs_delegation
*delegation
;
260 allow_signal(SIGKILL
);
262 spin_lock(&clp
->cl_lock
);
263 if (test_bit(NFS4CLNT_STATE_RECOVER
, &clp
->cl_state
) != 0)
265 if (test_bit(NFS4CLNT_LEASE_EXPIRED
, &clp
->cl_state
) == 0)
267 list_for_each_entry(delegation
, &clp
->cl_delegations
, super_list
) {
268 inode
= igrab(delegation
->inode
);
271 nfs_detach_delegation_locked(NFS_I(inode
), NULL
);
272 spin_unlock(&clp
->cl_lock
);
273 __nfs_inode_return_delegation(inode
, delegation
);
278 spin_unlock(&clp
->cl_lock
);
280 module_put_and_exit(0);
283 void nfs_expire_all_delegations(struct nfs_client
*clp
)
285 struct task_struct
*task
;
287 __module_get(THIS_MODULE
);
288 atomic_inc(&clp
->cl_count
);
289 task
= kthread_run(nfs_do_expire_all_delegations
, clp
,
290 "%u.%u.%u.%u-delegreturn",
291 NIPQUAD(clp
->cl_addr
.sin_addr
));
295 module_put(THIS_MODULE
);
299 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
301 void nfs_handle_cb_pathdown(struct nfs_client
*clp
)
303 struct nfs_delegation
*delegation
;
309 spin_lock(&clp
->cl_lock
);
310 list_for_each_entry(delegation
, &clp
->cl_delegations
, super_list
) {
311 inode
= igrab(delegation
->inode
);
314 nfs_detach_delegation_locked(NFS_I(inode
), NULL
);
315 spin_unlock(&clp
->cl_lock
);
316 __nfs_inode_return_delegation(inode
, delegation
);
320 spin_unlock(&clp
->cl_lock
);
323 struct recall_threadargs
{
325 struct nfs_client
*clp
;
326 const nfs4_stateid
*stateid
;
328 struct completion started
;
332 static int recall_thread(void *data
)
334 struct recall_threadargs
*args
= (struct recall_threadargs
*)data
;
335 struct inode
*inode
= igrab(args
->inode
);
336 struct nfs_client
*clp
= NFS_SERVER(inode
)->nfs_client
;
337 struct nfs_inode
*nfsi
= NFS_I(inode
);
338 struct nfs_delegation
*delegation
;
340 daemonize("nfsv4-delegreturn");
342 nfs_msync_inode(inode
);
343 down_read(&clp
->cl_sem
);
344 down_write(&nfsi
->rwsem
);
345 spin_lock(&clp
->cl_lock
);
346 delegation
= nfs_detach_delegation_locked(nfsi
, args
->stateid
);
347 if (delegation
!= NULL
)
350 args
->result
= -ENOENT
;
351 spin_unlock(&clp
->cl_lock
);
352 complete(&args
->started
);
353 nfs_delegation_claim_opens(inode
, args
->stateid
);
354 up_write(&nfsi
->rwsem
);
355 up_read(&clp
->cl_sem
);
356 nfs_msync_inode(inode
);
358 if (delegation
!= NULL
)
359 nfs_do_return_delegation(inode
, delegation
);
361 module_put_and_exit(0);
365 * Asynchronous delegation recall!
367 int nfs_async_inode_return_delegation(struct inode
*inode
, const nfs4_stateid
*stateid
)
369 struct recall_threadargs data
= {
375 init_completion(&data
.started
);
376 __module_get(THIS_MODULE
);
377 status
= kernel_thread(recall_thread
, &data
, CLONE_KERNEL
);
380 wait_for_completion(&data
.started
);
383 module_put(THIS_MODULE
);
388 * Retrieve the inode associated with a delegation
390 struct inode
*nfs_delegation_find_inode(struct nfs_client
*clp
, const struct nfs_fh
*fhandle
)
392 struct nfs_delegation
*delegation
;
393 struct inode
*res
= NULL
;
394 spin_lock(&clp
->cl_lock
);
395 list_for_each_entry(delegation
, &clp
->cl_delegations
, super_list
) {
396 if (nfs_compare_fh(fhandle
, &NFS_I(delegation
->inode
)->fh
) == 0) {
397 res
= igrab(delegation
->inode
);
401 spin_unlock(&clp
->cl_lock
);
406 * Mark all delegations as needing to be reclaimed
408 void nfs_delegation_mark_reclaim(struct nfs_client
*clp
)
410 struct nfs_delegation
*delegation
;
411 spin_lock(&clp
->cl_lock
);
412 list_for_each_entry(delegation
, &clp
->cl_delegations
, super_list
)
413 delegation
->flags
|= NFS_DELEGATION_NEED_RECLAIM
;
414 spin_unlock(&clp
->cl_lock
);
418 * Reap all unclaimed delegations after reboot recovery is done
420 void nfs_delegation_reap_unclaimed(struct nfs_client
*clp
)
422 struct nfs_delegation
*delegation
, *n
;
424 spin_lock(&clp
->cl_lock
);
425 list_for_each_entry_safe(delegation
, n
, &clp
->cl_delegations
, super_list
) {
426 if ((delegation
->flags
& NFS_DELEGATION_NEED_RECLAIM
) == 0)
428 list_move(&delegation
->super_list
, &head
);
429 NFS_I(delegation
->inode
)->delegation
= NULL
;
430 NFS_I(delegation
->inode
)->delegation_state
= 0;
432 spin_unlock(&clp
->cl_lock
);
433 while(!list_empty(&head
)) {
434 delegation
= list_entry(head
.next
, struct nfs_delegation
, super_list
);
435 list_del(&delegation
->super_list
);
436 nfs_free_delegation(delegation
);
440 int nfs4_copy_delegation_stateid(nfs4_stateid
*dst
, struct inode
*inode
)
442 struct nfs_client
*clp
= NFS_SERVER(inode
)->nfs_client
;
443 struct nfs_inode
*nfsi
= NFS_I(inode
);
444 struct nfs_delegation
*delegation
;
447 if (nfsi
->delegation_state
== 0)
449 spin_lock(&clp
->cl_lock
);
450 delegation
= nfsi
->delegation
;
451 if (delegation
!= NULL
) {
452 memcpy(dst
->data
, delegation
->stateid
.data
, sizeof(dst
->data
));
455 spin_unlock(&clp
->cl_lock
);