]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/nfs/delegation.c
pnfs/blocklayout: rewrite extent tracking
[mirror_ubuntu-zesty-kernel.git] / fs / nfs / delegation.c
1 /*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15
16 #include <linux/nfs4.h>
17 #include <linux/nfs_fs.h>
18 #include <linux/nfs_xdr.h>
19
20 #include "nfs4_fs.h"
21 #include "delegation.h"
22 #include "internal.h"
23 #include "nfs4trace.h"
24
25 static void nfs_free_delegation(struct nfs_delegation *delegation)
26 {
27 if (delegation->cred) {
28 put_rpccred(delegation->cred);
29 delegation->cred = NULL;
30 }
31 kfree_rcu(delegation, rcu);
32 }
33
34 /**
35 * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
36 * @delegation: delegation to process
37 *
38 */
39 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
40 {
41 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
42 }
43
44 static int
45 nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
46 {
47 struct nfs_delegation *delegation;
48 int ret = 0;
49
50 flags &= FMODE_READ|FMODE_WRITE;
51 rcu_read_lock();
52 delegation = rcu_dereference(NFS_I(inode)->delegation);
53 if (delegation != NULL && (delegation->type & flags) == flags &&
54 !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
55 if (mark)
56 nfs_mark_delegation_referenced(delegation);
57 ret = 1;
58 }
59 rcu_read_unlock();
60 return ret;
61 }
62 /**
63 * nfs_have_delegation - check if inode has a delegation, mark it
64 * NFS_DELEGATION_REFERENCED if there is one.
65 * @inode: inode to check
66 * @flags: delegation types to check for
67 *
68 * Returns one if inode has the indicated delegation, otherwise zero.
69 */
70 int nfs4_have_delegation(struct inode *inode, fmode_t flags)
71 {
72 return nfs4_do_check_delegation(inode, flags, true);
73 }
74
75 /*
76 * nfs4_check_delegation - check if inode has a delegation, do not mark
77 * NFS_DELEGATION_REFERENCED if it has one.
78 */
79 int nfs4_check_delegation(struct inode *inode, fmode_t flags)
80 {
81 return nfs4_do_check_delegation(inode, flags, false);
82 }
83
84 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
85 {
86 struct inode *inode = state->inode;
87 struct file_lock *fl;
88 int status = 0;
89
90 if (inode->i_flock == NULL)
91 goto out;
92
93 /* Protect inode->i_flock using the i_lock */
94 spin_lock(&inode->i_lock);
95 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
96 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
97 continue;
98 if (nfs_file_open_context(fl->fl_file) != ctx)
99 continue;
100 spin_unlock(&inode->i_lock);
101 status = nfs4_lock_delegation_recall(fl, state, stateid);
102 if (status < 0)
103 goto out;
104 spin_lock(&inode->i_lock);
105 }
106 spin_unlock(&inode->i_lock);
107 out:
108 return status;
109 }
110
111 static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
112 {
113 struct nfs_inode *nfsi = NFS_I(inode);
114 struct nfs_open_context *ctx;
115 struct nfs4_state_owner *sp;
116 struct nfs4_state *state;
117 unsigned int seq;
118 int err;
119
120 again:
121 spin_lock(&inode->i_lock);
122 list_for_each_entry(ctx, &nfsi->open_files, list) {
123 state = ctx->state;
124 if (state == NULL)
125 continue;
126 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
127 continue;
128 if (!nfs4_stateid_match(&state->stateid, stateid))
129 continue;
130 get_nfs_open_context(ctx);
131 spin_unlock(&inode->i_lock);
132 sp = state->owner;
133 /* Block nfs4_proc_unlck */
134 mutex_lock(&sp->so_delegreturn_mutex);
135 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
136 err = nfs4_open_delegation_recall(ctx, state, stateid);
137 if (!err)
138 err = nfs_delegation_claim_locks(ctx, state, stateid);
139 if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
140 err = -EAGAIN;
141 mutex_unlock(&sp->so_delegreturn_mutex);
142 put_nfs_open_context(ctx);
143 if (err != 0)
144 return err;
145 goto again;
146 }
147 spin_unlock(&inode->i_lock);
148 return 0;
149 }
150
151 /**
152 * nfs_inode_reclaim_delegation - process a delegation reclaim request
153 * @inode: inode to process
154 * @cred: credential to use for request
155 * @res: new delegation state from server
156 *
157 */
158 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
159 struct nfs_openres *res)
160 {
161 struct nfs_delegation *delegation;
162 struct rpc_cred *oldcred = NULL;
163
164 rcu_read_lock();
165 delegation = rcu_dereference(NFS_I(inode)->delegation);
166 if (delegation != NULL) {
167 spin_lock(&delegation->lock);
168 if (delegation->inode != NULL) {
169 nfs4_stateid_copy(&delegation->stateid, &res->delegation);
170 delegation->type = res->delegation_type;
171 delegation->maxsize = res->maxsize;
172 oldcred = delegation->cred;
173 delegation->cred = get_rpccred(cred);
174 clear_bit(NFS_DELEGATION_NEED_RECLAIM,
175 &delegation->flags);
176 NFS_I(inode)->delegation_state = delegation->type;
177 spin_unlock(&delegation->lock);
178 put_rpccred(oldcred);
179 rcu_read_unlock();
180 trace_nfs4_reclaim_delegation(inode, res->delegation_type);
181 } else {
182 /* We appear to have raced with a delegation return. */
183 spin_unlock(&delegation->lock);
184 rcu_read_unlock();
185 nfs_inode_set_delegation(inode, cred, res);
186 }
187 } else {
188 rcu_read_unlock();
189 }
190 }
191
192 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
193 {
194 int res = 0;
195
196 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
197 nfs_free_delegation(delegation);
198 return res;
199 }
200
201 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
202 {
203 struct inode *inode = NULL;
204
205 spin_lock(&delegation->lock);
206 if (delegation->inode != NULL)
207 inode = igrab(delegation->inode);
208 spin_unlock(&delegation->lock);
209 return inode;
210 }
211
212 static struct nfs_delegation *
213 nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
214 {
215 struct nfs_delegation *ret = NULL;
216 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
217
218 if (delegation == NULL)
219 goto out;
220 spin_lock(&delegation->lock);
221 if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
222 ret = delegation;
223 spin_unlock(&delegation->lock);
224 out:
225 return ret;
226 }
227
228 static struct nfs_delegation *
229 nfs_start_delegation_return(struct nfs_inode *nfsi)
230 {
231 struct nfs_delegation *delegation;
232
233 rcu_read_lock();
234 delegation = nfs_start_delegation_return_locked(nfsi);
235 rcu_read_unlock();
236 return delegation;
237 }
238
239 static void
240 nfs_abort_delegation_return(struct nfs_delegation *delegation,
241 struct nfs_client *clp)
242 {
243
244 spin_lock(&delegation->lock);
245 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
246 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
247 spin_unlock(&delegation->lock);
248 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
249 }
250
251 static struct nfs_delegation *
252 nfs_detach_delegation_locked(struct nfs_inode *nfsi,
253 struct nfs_delegation *delegation,
254 struct nfs_client *clp)
255 {
256 struct nfs_delegation *deleg_cur =
257 rcu_dereference_protected(nfsi->delegation,
258 lockdep_is_held(&clp->cl_lock));
259
260 if (deleg_cur == NULL || delegation != deleg_cur)
261 return NULL;
262
263 spin_lock(&delegation->lock);
264 set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
265 list_del_rcu(&delegation->super_list);
266 delegation->inode = NULL;
267 nfsi->delegation_state = 0;
268 rcu_assign_pointer(nfsi->delegation, NULL);
269 spin_unlock(&delegation->lock);
270 return delegation;
271 }
272
273 static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
274 struct nfs_delegation *delegation,
275 struct nfs_server *server)
276 {
277 struct nfs_client *clp = server->nfs_client;
278
279 spin_lock(&clp->cl_lock);
280 delegation = nfs_detach_delegation_locked(nfsi, delegation, clp);
281 spin_unlock(&clp->cl_lock);
282 return delegation;
283 }
284
285 static struct nfs_delegation *
286 nfs_inode_detach_delegation(struct inode *inode)
287 {
288 struct nfs_inode *nfsi = NFS_I(inode);
289 struct nfs_server *server = NFS_SERVER(inode);
290 struct nfs_delegation *delegation;
291
292 delegation = nfs_start_delegation_return(nfsi);
293 if (delegation == NULL)
294 return NULL;
295 return nfs_detach_delegation(nfsi, delegation, server);
296 }
297
298 /**
299 * nfs_inode_set_delegation - set up a delegation on an inode
300 * @inode: inode to which delegation applies
301 * @cred: cred to use for subsequent delegation processing
302 * @res: new delegation state from server
303 *
304 * Returns zero on success, or a negative errno value.
305 */
306 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
307 {
308 struct nfs_server *server = NFS_SERVER(inode);
309 struct nfs_client *clp = server->nfs_client;
310 struct nfs_inode *nfsi = NFS_I(inode);
311 struct nfs_delegation *delegation, *old_delegation;
312 struct nfs_delegation *freeme = NULL;
313 int status = 0;
314
315 delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
316 if (delegation == NULL)
317 return -ENOMEM;
318 nfs4_stateid_copy(&delegation->stateid, &res->delegation);
319 delegation->type = res->delegation_type;
320 delegation->maxsize = res->maxsize;
321 delegation->change_attr = inode->i_version;
322 delegation->cred = get_rpccred(cred);
323 delegation->inode = inode;
324 delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
325 spin_lock_init(&delegation->lock);
326
327 spin_lock(&clp->cl_lock);
328 old_delegation = rcu_dereference_protected(nfsi->delegation,
329 lockdep_is_held(&clp->cl_lock));
330 if (old_delegation != NULL) {
331 if (nfs4_stateid_match(&delegation->stateid,
332 &old_delegation->stateid) &&
333 delegation->type == old_delegation->type) {
334 goto out;
335 }
336 /*
337 * Deal with broken servers that hand out two
338 * delegations for the same file.
339 * Allow for upgrades to a WRITE delegation, but
340 * nothing else.
341 */
342 dfprintk(FILE, "%s: server %s handed out "
343 "a duplicate delegation!\n",
344 __func__, clp->cl_hostname);
345 if (delegation->type == old_delegation->type ||
346 !(delegation->type & FMODE_WRITE)) {
347 freeme = delegation;
348 delegation = NULL;
349 goto out;
350 }
351 freeme = nfs_detach_delegation_locked(nfsi,
352 old_delegation, clp);
353 if (freeme == NULL)
354 goto out;
355 }
356 list_add_rcu(&delegation->super_list, &server->delegations);
357 nfsi->delegation_state = delegation->type;
358 rcu_assign_pointer(nfsi->delegation, delegation);
359 delegation = NULL;
360
361 /* Ensure we revalidate the attributes and page cache! */
362 spin_lock(&inode->i_lock);
363 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
364 spin_unlock(&inode->i_lock);
365 trace_nfs4_set_delegation(inode, res->delegation_type);
366
367 out:
368 spin_unlock(&clp->cl_lock);
369 if (delegation != NULL)
370 nfs_free_delegation(delegation);
371 if (freeme != NULL)
372 nfs_do_return_delegation(inode, freeme, 0);
373 return status;
374 }
375
376 /*
377 * Basic procedure for returning a delegation to the server
378 */
379 static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
380 {
381 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
382 struct nfs_inode *nfsi = NFS_I(inode);
383 int err;
384
385 if (delegation == NULL)
386 return 0;
387 do {
388 err = nfs_delegation_claim_opens(inode, &delegation->stateid);
389 if (!issync || err != -EAGAIN)
390 break;
391 /*
392 * Guard against state recovery
393 */
394 err = nfs4_wait_clnt_recover(clp);
395 } while (err == 0);
396
397 if (err) {
398 nfs_abort_delegation_return(delegation, clp);
399 goto out;
400 }
401 if (!nfs_detach_delegation(nfsi, delegation, NFS_SERVER(inode)))
402 goto out;
403
404 err = nfs_do_return_delegation(inode, delegation, issync);
405 out:
406 return err;
407 }
408
409 static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
410 {
411 bool ret = false;
412
413 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
414 ret = true;
415 if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) {
416 struct inode *inode;
417
418 spin_lock(&delegation->lock);
419 inode = delegation->inode;
420 if (inode && list_empty(&NFS_I(inode)->open_files))
421 ret = true;
422 spin_unlock(&delegation->lock);
423 }
424 return ret;
425 }
426
427 /**
428 * nfs_client_return_marked_delegations - return previously marked delegations
429 * @clp: nfs_client to process
430 *
431 * Note that this function is designed to be called by the state
432 * manager thread. For this reason, it cannot flush the dirty data,
433 * since that could deadlock in case of a state recovery error.
434 *
435 * Returns zero on success, or a negative errno value.
436 */
437 int nfs_client_return_marked_delegations(struct nfs_client *clp)
438 {
439 struct nfs_delegation *delegation;
440 struct nfs_server *server;
441 struct inode *inode;
442 int err = 0;
443
444 restart:
445 rcu_read_lock();
446 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
447 list_for_each_entry_rcu(delegation, &server->delegations,
448 super_list) {
449 if (!nfs_delegation_need_return(delegation))
450 continue;
451 inode = nfs_delegation_grab_inode(delegation);
452 if (inode == NULL)
453 continue;
454 delegation = nfs_start_delegation_return_locked(NFS_I(inode));
455 rcu_read_unlock();
456
457 err = nfs_end_delegation_return(inode, delegation, 0);
458 iput(inode);
459 if (!err)
460 goto restart;
461 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
462 return err;
463 }
464 }
465 rcu_read_unlock();
466 return 0;
467 }
468
469 /**
470 * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
471 * @inode: inode to process
472 *
473 * Does not protect against delegation reclaims, therefore really only safe
474 * to be called from nfs4_clear_inode().
475 */
476 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
477 {
478 struct nfs_delegation *delegation;
479
480 delegation = nfs_inode_detach_delegation(inode);
481 if (delegation != NULL)
482 nfs_do_return_delegation(inode, delegation, 0);
483 }
484
485 /**
486 * nfs_inode_return_delegation - synchronously return a delegation
487 * @inode: inode to process
488 *
489 * This routine will always flush any dirty data to disk on the
490 * assumption that if we need to return the delegation, then
491 * we should stop caching.
492 *
493 * Returns zero on success, or a negative errno value.
494 */
495 int nfs4_inode_return_delegation(struct inode *inode)
496 {
497 struct nfs_inode *nfsi = NFS_I(inode);
498 struct nfs_delegation *delegation;
499 int err = 0;
500
501 nfs_wb_all(inode);
502 delegation = nfs_start_delegation_return(nfsi);
503 if (delegation != NULL)
504 err = nfs_end_delegation_return(inode, delegation, 1);
505 return err;
506 }
507
508 static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
509 struct nfs_delegation *delegation)
510 {
511 set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
512 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
513 }
514
515 static void nfs_mark_return_delegation(struct nfs_server *server,
516 struct nfs_delegation *delegation)
517 {
518 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
519 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
520 }
521
522 static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
523 {
524 struct nfs_delegation *delegation;
525 bool ret = false;
526
527 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
528 nfs_mark_return_delegation(server, delegation);
529 ret = true;
530 }
531 return ret;
532 }
533
534 static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
535 {
536 struct nfs_server *server;
537
538 rcu_read_lock();
539 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
540 nfs_server_mark_return_all_delegations(server);
541 rcu_read_unlock();
542 }
543
544 static void nfs_delegation_run_state_manager(struct nfs_client *clp)
545 {
546 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
547 nfs4_schedule_state_manager(clp);
548 }
549
550 /**
551 * nfs_expire_all_delegations
552 * @clp: client to process
553 *
554 */
555 void nfs_expire_all_delegations(struct nfs_client *clp)
556 {
557 nfs_client_mark_return_all_delegations(clp);
558 nfs_delegation_run_state_manager(clp);
559 }
560
561 /**
562 * nfs_super_return_all_delegations - return delegations for one superblock
563 * @sb: sb to process
564 *
565 */
566 void nfs_server_return_all_delegations(struct nfs_server *server)
567 {
568 struct nfs_client *clp = server->nfs_client;
569 bool need_wait;
570
571 if (clp == NULL)
572 return;
573
574 rcu_read_lock();
575 need_wait = nfs_server_mark_return_all_delegations(server);
576 rcu_read_unlock();
577
578 if (need_wait) {
579 nfs4_schedule_state_manager(clp);
580 nfs4_wait_clnt_recover(clp);
581 }
582 }
583
584 static void nfs_mark_return_unused_delegation_types(struct nfs_server *server,
585 fmode_t flags)
586 {
587 struct nfs_delegation *delegation;
588
589 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
590 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
591 continue;
592 if (delegation->type & flags)
593 nfs_mark_return_if_closed_delegation(server, delegation);
594 }
595 }
596
597 static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *clp,
598 fmode_t flags)
599 {
600 struct nfs_server *server;
601
602 rcu_read_lock();
603 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
604 nfs_mark_return_unused_delegation_types(server, flags);
605 rcu_read_unlock();
606 }
607
608 void nfs_remove_bad_delegation(struct inode *inode)
609 {
610 struct nfs_delegation *delegation;
611
612 delegation = nfs_inode_detach_delegation(inode);
613 if (delegation) {
614 nfs_inode_find_state_and_recover(inode, &delegation->stateid);
615 nfs_free_delegation(delegation);
616 }
617 }
618 EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
619
620 /**
621 * nfs_expire_unused_delegation_types
622 * @clp: client to process
623 * @flags: delegation types to expire
624 *
625 */
626 void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags)
627 {
628 nfs_client_mark_return_unused_delegation_types(clp, flags);
629 nfs_delegation_run_state_manager(clp);
630 }
631
632 static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
633 {
634 struct nfs_delegation *delegation;
635
636 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
637 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
638 continue;
639 nfs_mark_return_if_closed_delegation(server, delegation);
640 }
641 }
642
643 /**
644 * nfs_expire_unreferenced_delegations - Eliminate unused delegations
645 * @clp: nfs_client to process
646 *
647 */
648 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
649 {
650 struct nfs_server *server;
651
652 rcu_read_lock();
653 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
654 nfs_mark_return_unreferenced_delegations(server);
655 rcu_read_unlock();
656
657 nfs_delegation_run_state_manager(clp);
658 }
659
660 /**
661 * nfs_async_inode_return_delegation - asynchronously return a delegation
662 * @inode: inode to process
663 * @stateid: state ID information
664 *
665 * Returns zero on success, or a negative errno value.
666 */
667 int nfs_async_inode_return_delegation(struct inode *inode,
668 const nfs4_stateid *stateid)
669 {
670 struct nfs_server *server = NFS_SERVER(inode);
671 struct nfs_client *clp = server->nfs_client;
672 struct nfs_delegation *delegation;
673
674 filemap_flush(inode->i_mapping);
675
676 rcu_read_lock();
677 delegation = rcu_dereference(NFS_I(inode)->delegation);
678 if (delegation == NULL)
679 goto out_enoent;
680
681 if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
682 goto out_enoent;
683 nfs_mark_return_delegation(server, delegation);
684 rcu_read_unlock();
685
686 nfs_delegation_run_state_manager(clp);
687 return 0;
688 out_enoent:
689 rcu_read_unlock();
690 return -ENOENT;
691 }
692
693 static struct inode *
694 nfs_delegation_find_inode_server(struct nfs_server *server,
695 const struct nfs_fh *fhandle)
696 {
697 struct nfs_delegation *delegation;
698 struct inode *res = NULL;
699
700 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
701 spin_lock(&delegation->lock);
702 if (delegation->inode != NULL &&
703 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
704 res = igrab(delegation->inode);
705 }
706 spin_unlock(&delegation->lock);
707 if (res != NULL)
708 break;
709 }
710 return res;
711 }
712
713 /**
714 * nfs_delegation_find_inode - retrieve the inode associated with a delegation
715 * @clp: client state handle
716 * @fhandle: filehandle from a delegation recall
717 *
718 * Returns pointer to inode matching "fhandle," or NULL if a matching inode
719 * cannot be found.
720 */
721 struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
722 const struct nfs_fh *fhandle)
723 {
724 struct nfs_server *server;
725 struct inode *res = NULL;
726
727 rcu_read_lock();
728 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
729 res = nfs_delegation_find_inode_server(server, fhandle);
730 if (res != NULL)
731 break;
732 }
733 rcu_read_unlock();
734 return res;
735 }
736
737 static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
738 {
739 struct nfs_delegation *delegation;
740
741 list_for_each_entry_rcu(delegation, &server->delegations, super_list)
742 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
743 }
744
745 /**
746 * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
747 * @clp: nfs_client to process
748 *
749 */
750 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
751 {
752 struct nfs_server *server;
753
754 rcu_read_lock();
755 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
756 nfs_delegation_mark_reclaim_server(server);
757 rcu_read_unlock();
758 }
759
760 /**
761 * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
762 * @clp: nfs_client to process
763 *
764 */
765 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
766 {
767 struct nfs_delegation *delegation;
768 struct nfs_server *server;
769 struct inode *inode;
770
771 restart:
772 rcu_read_lock();
773 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
774 list_for_each_entry_rcu(delegation, &server->delegations,
775 super_list) {
776 if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
777 &delegation->flags) == 0)
778 continue;
779 inode = nfs_delegation_grab_inode(delegation);
780 if (inode == NULL)
781 continue;
782 delegation = nfs_detach_delegation(NFS_I(inode),
783 delegation, server);
784 rcu_read_unlock();
785
786 if (delegation != NULL)
787 nfs_free_delegation(delegation);
788 iput(inode);
789 goto restart;
790 }
791 }
792 rcu_read_unlock();
793 }
794
795 /**
796 * nfs_delegations_present - check for existence of delegations
797 * @clp: client state handle
798 *
799 * Returns one if there are any nfs_delegation structures attached
800 * to this nfs_client.
801 */
802 int nfs_delegations_present(struct nfs_client *clp)
803 {
804 struct nfs_server *server;
805 int ret = 0;
806
807 rcu_read_lock();
808 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
809 if (!list_empty(&server->delegations)) {
810 ret = 1;
811 break;
812 }
813 rcu_read_unlock();
814 return ret;
815 }
816
817 /**
818 * nfs4_copy_delegation_stateid - Copy inode's state ID information
819 * @dst: stateid data structure to fill in
820 * @inode: inode to check
821 * @flags: delegation type requirement
822 *
823 * Returns "true" and fills in "dst->data" * if inode had a delegation,
824 * otherwise "false" is returned.
825 */
826 bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
827 fmode_t flags)
828 {
829 struct nfs_inode *nfsi = NFS_I(inode);
830 struct nfs_delegation *delegation;
831 bool ret;
832
833 flags &= FMODE_READ|FMODE_WRITE;
834 rcu_read_lock();
835 delegation = rcu_dereference(nfsi->delegation);
836 ret = (delegation != NULL && (delegation->type & flags) == flags);
837 if (ret) {
838 nfs4_stateid_copy(dst, &delegation->stateid);
839 nfs_mark_delegation_referenced(delegation);
840 }
841 rcu_read_unlock();
842 return ret;
843 }