]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/nfs/delegation.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-artful-kernel.git] / fs / nfs / delegation.c
1 /*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
18
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 #include "internal.h"
22
23 static void nfs_do_free_delegation(struct nfs_delegation *delegation)
24 {
25 kfree(delegation);
26 }
27
28 static void nfs_free_delegation_callback(struct rcu_head *head)
29 {
30 struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
31
32 nfs_do_free_delegation(delegation);
33 }
34
35 static void nfs_free_delegation(struct nfs_delegation *delegation)
36 {
37 struct rpc_cred *cred;
38
39 cred = rcu_dereference(delegation->cred);
40 rcu_assign_pointer(delegation->cred, NULL);
41 call_rcu(&delegation->rcu, nfs_free_delegation_callback);
42 if (cred)
43 put_rpccred(cred);
44 }
45
46 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
47 {
48 struct inode *inode = state->inode;
49 struct file_lock *fl;
50 int status;
51
52 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
53 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
54 continue;
55 if (nfs_file_open_context(fl->fl_file) != ctx)
56 continue;
57 status = nfs4_lock_delegation_recall(state, fl);
58 if (status >= 0)
59 continue;
60 switch (status) {
61 default:
62 printk(KERN_ERR "%s: unhandled error %d.\n",
63 __FUNCTION__, status);
64 case -NFS4ERR_EXPIRED:
65 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
66 case -NFS4ERR_STALE_CLIENTID:
67 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
68 goto out_err;
69 }
70 }
71 return 0;
72 out_err:
73 return status;
74 }
75
76 static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
77 {
78 struct nfs_inode *nfsi = NFS_I(inode);
79 struct nfs_open_context *ctx;
80 struct nfs4_state *state;
81 int err;
82
83 again:
84 spin_lock(&inode->i_lock);
85 list_for_each_entry(ctx, &nfsi->open_files, list) {
86 state = ctx->state;
87 if (state == NULL)
88 continue;
89 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
90 continue;
91 if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
92 continue;
93 get_nfs_open_context(ctx);
94 spin_unlock(&inode->i_lock);
95 err = nfs4_open_delegation_recall(ctx, state, stateid);
96 if (err >= 0)
97 err = nfs_delegation_claim_locks(ctx, state);
98 put_nfs_open_context(ctx);
99 if (err != 0)
100 return;
101 goto again;
102 }
103 spin_unlock(&inode->i_lock);
104 }
105
106 /*
107 * Set up a delegation on an inode
108 */
109 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
110 {
111 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
112 struct rpc_cred *oldcred;
113
114 if (delegation == NULL)
115 return;
116 memcpy(delegation->stateid.data, res->delegation.data,
117 sizeof(delegation->stateid.data));
118 delegation->type = res->delegation_type;
119 delegation->maxsize = res->maxsize;
120 oldcred = delegation->cred;
121 delegation->cred = get_rpccred(cred);
122 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
123 NFS_I(inode)->delegation_state = delegation->type;
124 smp_wmb();
125 put_rpccred(oldcred);
126 }
127
128 /*
129 * Set up a delegation on an inode
130 */
131 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
132 {
133 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
134 struct nfs_inode *nfsi = NFS_I(inode);
135 struct nfs_delegation *delegation;
136 int status = 0;
137
138 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
139 if (delegation == NULL)
140 return -ENOMEM;
141 memcpy(delegation->stateid.data, res->delegation.data,
142 sizeof(delegation->stateid.data));
143 delegation->type = res->delegation_type;
144 delegation->maxsize = res->maxsize;
145 delegation->change_attr = nfsi->change_attr;
146 delegation->cred = get_rpccred(cred);
147 delegation->inode = inode;
148
149 spin_lock(&clp->cl_lock);
150 if (rcu_dereference(nfsi->delegation) == NULL) {
151 list_add_rcu(&delegation->super_list, &clp->cl_delegations);
152 nfsi->delegation_state = delegation->type;
153 rcu_assign_pointer(nfsi->delegation, delegation);
154 delegation = NULL;
155 } else {
156 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
157 sizeof(delegation->stateid)) != 0 ||
158 delegation->type != nfsi->delegation->type) {
159 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
160 __FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
161 status = -EIO;
162 }
163 }
164
165 /* Ensure we revalidate the attributes and page cache! */
166 spin_lock(&inode->i_lock);
167 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
168 spin_unlock(&inode->i_lock);
169
170 spin_unlock(&clp->cl_lock);
171 if (delegation != NULL)
172 nfs_free_delegation(delegation);
173 return status;
174 }
175
176 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
177 {
178 int res = 0;
179
180 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
181 nfs_free_delegation(delegation);
182 return res;
183 }
184
185 /* Sync all data to disk upon delegation return */
186 static void nfs_msync_inode(struct inode *inode)
187 {
188 filemap_fdatawrite(inode->i_mapping);
189 nfs_wb_all(inode);
190 filemap_fdatawait(inode->i_mapping);
191 }
192
193 /*
194 * Basic procedure for returning a delegation to the server
195 */
196 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
197 {
198 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
199 struct nfs_inode *nfsi = NFS_I(inode);
200
201 nfs_msync_inode(inode);
202 down_read(&clp->cl_sem);
203 /* Guard against new delegated open calls */
204 down_write(&nfsi->rwsem);
205 nfs_delegation_claim_opens(inode, &delegation->stateid);
206 up_write(&nfsi->rwsem);
207 up_read(&clp->cl_sem);
208 nfs_msync_inode(inode);
209
210 return nfs_do_return_delegation(inode, delegation);
211 }
212
213 static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
214 {
215 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
216
217 if (delegation == NULL)
218 goto nomatch;
219 if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
220 sizeof(delegation->stateid.data)) != 0)
221 goto nomatch;
222 list_del_rcu(&delegation->super_list);
223 nfsi->delegation_state = 0;
224 rcu_assign_pointer(nfsi->delegation, NULL);
225 return delegation;
226 nomatch:
227 return NULL;
228 }
229
230 int nfs_inode_return_delegation(struct inode *inode)
231 {
232 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
233 struct nfs_inode *nfsi = NFS_I(inode);
234 struct nfs_delegation *delegation;
235 int err = 0;
236
237 if (rcu_dereference(nfsi->delegation) != NULL) {
238 spin_lock(&clp->cl_lock);
239 delegation = nfs_detach_delegation_locked(nfsi, NULL);
240 spin_unlock(&clp->cl_lock);
241 if (delegation != NULL)
242 err = __nfs_inode_return_delegation(inode, delegation);
243 }
244 return err;
245 }
246
247 /*
248 * Return all delegations associated to a super block
249 */
250 void nfs_return_all_delegations(struct super_block *sb)
251 {
252 struct nfs_client *clp = NFS_SB(sb)->nfs_client;
253 struct nfs_delegation *delegation;
254 struct inode *inode;
255
256 if (clp == NULL)
257 return;
258 restart:
259 rcu_read_lock();
260 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
261 if (delegation->inode->i_sb != sb)
262 continue;
263 inode = igrab(delegation->inode);
264 if (inode == NULL)
265 continue;
266 spin_lock(&clp->cl_lock);
267 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
268 spin_unlock(&clp->cl_lock);
269 rcu_read_unlock();
270 if (delegation != NULL)
271 __nfs_inode_return_delegation(inode, delegation);
272 iput(inode);
273 goto restart;
274 }
275 rcu_read_unlock();
276 }
277
278 static int nfs_do_expire_all_delegations(void *ptr)
279 {
280 struct nfs_client *clp = ptr;
281 struct nfs_delegation *delegation;
282 struct inode *inode;
283
284 allow_signal(SIGKILL);
285 restart:
286 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
287 goto out;
288 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
289 goto out;
290 rcu_read_lock();
291 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
292 inode = igrab(delegation->inode);
293 if (inode == NULL)
294 continue;
295 spin_lock(&clp->cl_lock);
296 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
297 spin_unlock(&clp->cl_lock);
298 rcu_read_unlock();
299 if (delegation)
300 __nfs_inode_return_delegation(inode, delegation);
301 iput(inode);
302 goto restart;
303 }
304 rcu_read_unlock();
305 out:
306 nfs_put_client(clp);
307 module_put_and_exit(0);
308 }
309
310 void nfs_expire_all_delegations(struct nfs_client *clp)
311 {
312 struct task_struct *task;
313
314 __module_get(THIS_MODULE);
315 atomic_inc(&clp->cl_count);
316 task = kthread_run(nfs_do_expire_all_delegations, clp,
317 "%u.%u.%u.%u-delegreturn",
318 NIPQUAD(clp->cl_addr.sin_addr));
319 if (!IS_ERR(task))
320 return;
321 nfs_put_client(clp);
322 module_put(THIS_MODULE);
323 }
324
325 /*
326 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
327 */
328 void nfs_handle_cb_pathdown(struct nfs_client *clp)
329 {
330 struct nfs_delegation *delegation;
331 struct inode *inode;
332
333 if (clp == NULL)
334 return;
335 restart:
336 rcu_read_lock();
337 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
338 inode = igrab(delegation->inode);
339 if (inode == NULL)
340 continue;
341 spin_lock(&clp->cl_lock);
342 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
343 spin_unlock(&clp->cl_lock);
344 rcu_read_unlock();
345 if (delegation != NULL)
346 __nfs_inode_return_delegation(inode, delegation);
347 iput(inode);
348 goto restart;
349 }
350 rcu_read_unlock();
351 }
352
353 struct recall_threadargs {
354 struct inode *inode;
355 struct nfs_client *clp;
356 const nfs4_stateid *stateid;
357
358 struct completion started;
359 int result;
360 };
361
362 static int recall_thread(void *data)
363 {
364 struct recall_threadargs *args = (struct recall_threadargs *)data;
365 struct inode *inode = igrab(args->inode);
366 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
367 struct nfs_inode *nfsi = NFS_I(inode);
368 struct nfs_delegation *delegation;
369
370 daemonize("nfsv4-delegreturn");
371
372 nfs_msync_inode(inode);
373 down_read(&clp->cl_sem);
374 down_write(&nfsi->rwsem);
375 spin_lock(&clp->cl_lock);
376 delegation = nfs_detach_delegation_locked(nfsi, args->stateid);
377 if (delegation != NULL)
378 args->result = 0;
379 else
380 args->result = -ENOENT;
381 spin_unlock(&clp->cl_lock);
382 complete(&args->started);
383 nfs_delegation_claim_opens(inode, args->stateid);
384 up_write(&nfsi->rwsem);
385 up_read(&clp->cl_sem);
386 nfs_msync_inode(inode);
387
388 if (delegation != NULL)
389 nfs_do_return_delegation(inode, delegation);
390 iput(inode);
391 module_put_and_exit(0);
392 }
393
394 /*
395 * Asynchronous delegation recall!
396 */
397 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
398 {
399 struct recall_threadargs data = {
400 .inode = inode,
401 .stateid = stateid,
402 };
403 int status;
404
405 init_completion(&data.started);
406 __module_get(THIS_MODULE);
407 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
408 if (status < 0)
409 goto out_module_put;
410 wait_for_completion(&data.started);
411 return data.result;
412 out_module_put:
413 module_put(THIS_MODULE);
414 return status;
415 }
416
417 /*
418 * Retrieve the inode associated with a delegation
419 */
420 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
421 {
422 struct nfs_delegation *delegation;
423 struct inode *res = NULL;
424 rcu_read_lock();
425 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
426 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
427 res = igrab(delegation->inode);
428 break;
429 }
430 }
431 rcu_read_unlock();
432 return res;
433 }
434
435 /*
436 * Mark all delegations as needing to be reclaimed
437 */
438 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
439 {
440 struct nfs_delegation *delegation;
441 rcu_read_lock();
442 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
443 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
444 rcu_read_unlock();
445 }
446
447 /*
448 * Reap all unclaimed delegations after reboot recovery is done
449 */
450 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
451 {
452 struct nfs_delegation *delegation;
453 restart:
454 rcu_read_lock();
455 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
456 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
457 continue;
458 spin_lock(&clp->cl_lock);
459 delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL);
460 spin_unlock(&clp->cl_lock);
461 rcu_read_unlock();
462 if (delegation != NULL)
463 nfs_free_delegation(delegation);
464 goto restart;
465 }
466 rcu_read_unlock();
467 }
468
469 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
470 {
471 struct nfs_inode *nfsi = NFS_I(inode);
472 struct nfs_delegation *delegation;
473 int ret = 0;
474
475 rcu_read_lock();
476 delegation = rcu_dereference(nfsi->delegation);
477 if (delegation != NULL) {
478 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
479 ret = 1;
480 }
481 rcu_read_unlock();
482 return ret;
483 }