]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/nfs/nfs4state.c
[PATCH] NLM: fix a client-side race on blocking locks.
[mirror_ubuntu-artful-kernel.git] / fs / nfs / nfs4state.c
CommitLineData
1da177e4
LT
1/*
2 * fs/nfs/nfs4state.c
3 *
4 * Client-side XDR for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
38 * subsequent patch.
39 */
40
41#include <linux/config.h>
42#include <linux/slab.h>
43#include <linux/smp_lock.h>
44#include <linux/nfs_fs.h>
45#include <linux/nfs_idmap.h>
46#include <linux/workqueue.h>
47#include <linux/bitops.h>
48
4ce79717 49#include "nfs4_fs.h"
1da177e4
LT
50#include "callback.h"
51#include "delegation.h"
52
53#define OPENOWNER_POOL_SIZE 8
54
4ce79717 55const nfs4_stateid zero_stateid;
1da177e4 56
4ce79717 57static DEFINE_SPINLOCK(state_spinlock);
1da177e4
LT
58static LIST_HEAD(nfs4_clientid_list);
59
60static void nfs4_recover_state(void *);
1da177e4
LT
61
62void
63init_nfsv4_state(struct nfs_server *server)
64{
65 server->nfs4_state = NULL;
66 INIT_LIST_HEAD(&server->nfs4_siblings);
67}
68
69void
70destroy_nfsv4_state(struct nfs_server *server)
71{
72 if (server->mnt_path) {
73 kfree(server->mnt_path);
74 server->mnt_path = NULL;
75 }
76 if (server->nfs4_state) {
77 nfs4_put_client(server->nfs4_state);
78 server->nfs4_state = NULL;
79 }
80}
81
82/*
83 * nfs4_get_client(): returns an empty client structure
84 * nfs4_put_client(): drops reference to client structure
85 *
86 * Since these are allocated/deallocated very rarely, we don't
87 * bother putting them in a slab cache...
88 */
89static struct nfs4_client *
90nfs4_alloc_client(struct in_addr *addr)
91{
92 struct nfs4_client *clp;
93
94 if (nfs_callback_up() < 0)
95 return NULL;
96 if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
97 nfs_callback_down();
98 return NULL;
99 }
100 memset(clp, 0, sizeof(*clp));
101 memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
102 init_rwsem(&clp->cl_sem);
103 INIT_LIST_HEAD(&clp->cl_delegations);
104 INIT_LIST_HEAD(&clp->cl_state_owners);
105 INIT_LIST_HEAD(&clp->cl_unused);
106 spin_lock_init(&clp->cl_lock);
107 atomic_set(&clp->cl_count, 1);
108 INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
109 INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
110 INIT_LIST_HEAD(&clp->cl_superblocks);
111 init_waitqueue_head(&clp->cl_waitq);
112 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
6a19275a 113 clp->cl_rpcclient = ERR_PTR(-EINVAL);
1da177e4
LT
114 clp->cl_boot_time = CURRENT_TIME;
115 clp->cl_state = 1 << NFS4CLNT_OK;
116 return clp;
117}
118
119static void
120nfs4_free_client(struct nfs4_client *clp)
121{
122 struct nfs4_state_owner *sp;
123
124 while (!list_empty(&clp->cl_unused)) {
125 sp = list_entry(clp->cl_unused.next,
126 struct nfs4_state_owner,
127 so_list);
128 list_del(&sp->so_list);
129 kfree(sp);
130 }
131 BUG_ON(!list_empty(&clp->cl_state_owners));
132 if (clp->cl_cred)
133 put_rpccred(clp->cl_cred);
134 nfs_idmap_delete(clp);
6a19275a 135 if (!IS_ERR(clp->cl_rpcclient))
1da177e4
LT
136 rpc_shutdown_client(clp->cl_rpcclient);
137 kfree(clp);
138 nfs_callback_down();
139}
140
141static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
142{
143 struct nfs4_client *clp;
144 list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
145 if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
146 atomic_inc(&clp->cl_count);
147 return clp;
148 }
149 }
150 return NULL;
151}
152
153struct nfs4_client *nfs4_find_client(struct in_addr *addr)
154{
155 struct nfs4_client *clp;
156 spin_lock(&state_spinlock);
157 clp = __nfs4_find_client(addr);
158 spin_unlock(&state_spinlock);
159 return clp;
160}
161
162struct nfs4_client *
163nfs4_get_client(struct in_addr *addr)
164{
165 struct nfs4_client *clp, *new = NULL;
166
167 spin_lock(&state_spinlock);
168 for (;;) {
169 clp = __nfs4_find_client(addr);
170 if (clp != NULL)
171 break;
172 clp = new;
173 if (clp != NULL) {
174 list_add(&clp->cl_servers, &nfs4_clientid_list);
175 new = NULL;
176 break;
177 }
178 spin_unlock(&state_spinlock);
179 new = nfs4_alloc_client(addr);
180 spin_lock(&state_spinlock);
181 if (new == NULL)
182 break;
183 }
184 spin_unlock(&state_spinlock);
185 if (new)
186 nfs4_free_client(new);
187 return clp;
188}
189
190void
191nfs4_put_client(struct nfs4_client *clp)
192{
193 if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
194 return;
195 list_del(&clp->cl_servers);
196 spin_unlock(&state_spinlock);
197 BUG_ON(!list_empty(&clp->cl_superblocks));
198 wake_up_all(&clp->cl_waitq);
199 rpc_wake_up(&clp->cl_rpcwaitq);
200 nfs4_kill_renewd(clp);
201 nfs4_free_client(clp);
202}
203
204static int __nfs4_init_client(struct nfs4_client *clp)
205{
206 int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
207 if (status == 0)
208 status = nfs4_proc_setclientid_confirm(clp);
209 if (status == 0)
210 nfs4_schedule_state_renewal(clp);
211 return status;
212}
213
214int nfs4_init_client(struct nfs4_client *clp)
215{
216 return nfs4_map_errors(__nfs4_init_client(clp));
217}
218
219u32
220nfs4_alloc_lockowner_id(struct nfs4_client *clp)
221{
222 return clp->cl_lockowner_id ++;
223}
224
225static struct nfs4_state_owner *
226nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
227{
228 struct nfs4_state_owner *sp = NULL;
229
230 if (!list_empty(&clp->cl_unused)) {
231 sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
232 atomic_inc(&sp->so_count);
233 sp->so_cred = cred;
234 list_move(&sp->so_list, &clp->cl_state_owners);
235 clp->cl_nunused--;
236 }
237 return sp;
238}
239
240static struct nfs4_state_owner *
241nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
242{
243 struct nfs4_state_owner *sp, *res = NULL;
244
245 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
246 if (sp->so_cred != cred)
247 continue;
248 atomic_inc(&sp->so_count);
249 /* Move to the head of the list */
250 list_move(&sp->so_list, &clp->cl_state_owners);
251 res = sp;
252 break;
253 }
254 return res;
255}
256
257/*
258 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
259 * create a new state_owner.
260 *
261 */
262static struct nfs4_state_owner *
263nfs4_alloc_state_owner(void)
264{
265 struct nfs4_state_owner *sp;
266
267 sp = kmalloc(sizeof(*sp),GFP_KERNEL);
268 if (!sp)
269 return NULL;
270 init_MUTEX(&sp->so_sema);
271 sp->so_seqid = 0; /* arbitrary */
272 INIT_LIST_HEAD(&sp->so_states);
273 INIT_LIST_HEAD(&sp->so_delegations);
274 atomic_set(&sp->so_count, 1);
275 return sp;
276}
277
278void
279nfs4_drop_state_owner(struct nfs4_state_owner *sp)
280{
281 struct nfs4_client *clp = sp->so_client;
282 spin_lock(&clp->cl_lock);
283 list_del_init(&sp->so_list);
284 spin_unlock(&clp->cl_lock);
285}
286
287/*
288 * Note: must be called with clp->cl_sem held in order to prevent races
289 * with reboot recovery!
290 */
291struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
292{
293 struct nfs4_client *clp = server->nfs4_state;
294 struct nfs4_state_owner *sp, *new;
295
296 get_rpccred(cred);
297 new = nfs4_alloc_state_owner();
298 spin_lock(&clp->cl_lock);
299 sp = nfs4_find_state_owner(clp, cred);
300 if (sp == NULL)
301 sp = nfs4_client_grab_unused(clp, cred);
302 if (sp == NULL && new != NULL) {
303 list_add(&new->so_list, &clp->cl_state_owners);
304 new->so_client = clp;
305 new->so_id = nfs4_alloc_lockowner_id(clp);
306 new->so_cred = cred;
307 sp = new;
308 new = NULL;
309 }
310 spin_unlock(&clp->cl_lock);
311 if (new)
312 kfree(new);
313 if (sp != NULL)
314 return sp;
315 put_rpccred(cred);
316 return NULL;
317}
318
319/*
320 * Must be called with clp->cl_sem held in order to avoid races
321 * with state recovery...
322 */
323void nfs4_put_state_owner(struct nfs4_state_owner *sp)
324{
325 struct nfs4_client *clp = sp->so_client;
326 struct rpc_cred *cred = sp->so_cred;
327
328 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
329 return;
330 if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
331 goto out_free;
332 if (list_empty(&sp->so_list))
333 goto out_free;
334 list_move(&sp->so_list, &clp->cl_unused);
335 clp->cl_nunused++;
336 spin_unlock(&clp->cl_lock);
337 put_rpccred(cred);
338 cred = NULL;
339 return;
340out_free:
341 list_del(&sp->so_list);
342 spin_unlock(&clp->cl_lock);
343 put_rpccred(cred);
344 kfree(sp);
345}
346
347static struct nfs4_state *
348nfs4_alloc_open_state(void)
349{
350 struct nfs4_state *state;
351
352 state = kmalloc(sizeof(*state), GFP_KERNEL);
353 if (!state)
354 return NULL;
355 state->state = 0;
356 state->nreaders = 0;
357 state->nwriters = 0;
358 state->flags = 0;
359 memset(state->stateid.data, 0, sizeof(state->stateid.data));
360 atomic_set(&state->count, 1);
361 INIT_LIST_HEAD(&state->lock_states);
362 init_MUTEX(&state->lock_sema);
363 rwlock_init(&state->state_lock);
364 return state;
365}
366
367static struct nfs4_state *
368__nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
369{
370 struct nfs_inode *nfsi = NFS_I(inode);
371 struct nfs4_state *state;
372
373 mode &= (FMODE_READ|FMODE_WRITE);
374 list_for_each_entry(state, &nfsi->open_states, inode_states) {
375 if (state->owner->so_cred != cred)
376 continue;
377 if ((mode & FMODE_READ) != 0 && state->nreaders == 0)
378 continue;
379 if ((mode & FMODE_WRITE) != 0 && state->nwriters == 0)
380 continue;
381 if ((state->state & mode) != mode)
382 continue;
383 atomic_inc(&state->count);
384 if (mode & FMODE_READ)
385 state->nreaders++;
386 if (mode & FMODE_WRITE)
387 state->nwriters++;
388 return state;
389 }
390 return NULL;
391}
392
393static struct nfs4_state *
394__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
395{
396 struct nfs_inode *nfsi = NFS_I(inode);
397 struct nfs4_state *state;
398
399 list_for_each_entry(state, &nfsi->open_states, inode_states) {
400 /* Is this in the process of being freed? */
401 if (state->nreaders == 0 && state->nwriters == 0)
402 continue;
403 if (state->owner == owner) {
404 atomic_inc(&state->count);
405 return state;
406 }
407 }
408 return NULL;
409}
410
411struct nfs4_state *
412nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
413{
414 struct nfs4_state *state;
415
416 spin_lock(&inode->i_lock);
417 state = __nfs4_find_state(inode, cred, mode);
418 spin_unlock(&inode->i_lock);
419 return state;
420}
421
422static void
423nfs4_free_open_state(struct nfs4_state *state)
424{
425 kfree(state);
426}
427
428struct nfs4_state *
429nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
430{
431 struct nfs4_state *state, *new;
432 struct nfs_inode *nfsi = NFS_I(inode);
433
434 spin_lock(&inode->i_lock);
435 state = __nfs4_find_state_byowner(inode, owner);
436 spin_unlock(&inode->i_lock);
437 if (state)
438 goto out;
439 new = nfs4_alloc_open_state();
440 spin_lock(&inode->i_lock);
441 state = __nfs4_find_state_byowner(inode, owner);
442 if (state == NULL && new != NULL) {
443 state = new;
444 /* Caller *must* be holding owner->so_sem */
445 /* Note: The reclaim code dictates that we add stateless
446 * and read-only stateids to the end of the list */
447 list_add_tail(&state->open_states, &owner->so_states);
448 state->owner = owner;
449 atomic_inc(&owner->so_count);
450 list_add(&state->inode_states, &nfsi->open_states);
451 state->inode = igrab(inode);
452 spin_unlock(&inode->i_lock);
453 } else {
454 spin_unlock(&inode->i_lock);
455 if (new)
456 nfs4_free_open_state(new);
457 }
458out:
459 return state;
460}
461
462/*
463 * Beware! Caller must be holding exactly one
464 * reference to clp->cl_sem and owner->so_sema!
465 */
466void nfs4_put_open_state(struct nfs4_state *state)
467{
468 struct inode *inode = state->inode;
469 struct nfs4_state_owner *owner = state->owner;
470
471 if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
472 return;
473 if (!list_empty(&state->inode_states))
474 list_del(&state->inode_states);
475 spin_unlock(&inode->i_lock);
476 list_del(&state->open_states);
477 iput(inode);
478 BUG_ON (state->state != 0);
479 nfs4_free_open_state(state);
480 nfs4_put_state_owner(owner);
481}
482
483/*
484 * Beware! Caller must be holding no references to clp->cl_sem!
485 * of owner->so_sema!
486 */
487void nfs4_close_state(struct nfs4_state *state, mode_t mode)
488{
489 struct inode *inode = state->inode;
490 struct nfs4_state_owner *owner = state->owner;
491 struct nfs4_client *clp = owner->so_client;
492 int newstate;
493
494 atomic_inc(&owner->so_count);
495 down_read(&clp->cl_sem);
496 down(&owner->so_sema);
497 /* Protect against nfs4_find_state() */
498 spin_lock(&inode->i_lock);
499 if (mode & FMODE_READ)
500 state->nreaders--;
501 if (mode & FMODE_WRITE)
502 state->nwriters--;
503 if (state->nwriters == 0) {
504 if (state->nreaders == 0)
505 list_del_init(&state->inode_states);
506 /* See reclaim code */
507 list_move_tail(&state->open_states, &owner->so_states);
508 }
509 spin_unlock(&inode->i_lock);
510 newstate = 0;
511 if (state->state != 0) {
512 if (state->nreaders)
513 newstate |= FMODE_READ;
514 if (state->nwriters)
515 newstate |= FMODE_WRITE;
516 if (state->state == newstate)
517 goto out;
518 if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS)
519 return;
520 }
521out:
522 nfs4_put_open_state(state);
523 up(&owner->so_sema);
524 nfs4_put_state_owner(owner);
525 up_read(&clp->cl_sem);
526}
527
528/*
529 * Search the state->lock_states for an existing lock_owner
530 * that is compatible with current->files
531 */
532static struct nfs4_lock_state *
533__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
534{
535 struct nfs4_lock_state *pos;
536 list_for_each_entry(pos, &state->lock_states, ls_locks) {
537 if (pos->ls_owner != fl_owner)
538 continue;
539 atomic_inc(&pos->ls_count);
540 return pos;
541 }
542 return NULL;
543}
544
545struct nfs4_lock_state *
546nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
547{
548 struct nfs4_lock_state *lsp;
549 read_lock(&state->state_lock);
550 lsp = __nfs4_find_lock_state(state, fl_owner);
551 read_unlock(&state->state_lock);
552 return lsp;
553}
554
555/*
556 * Return a compatible lock_state. If no initialized lock_state structure
557 * exists, return an uninitialized one.
558 *
559 * The caller must be holding state->lock_sema
560 */
561static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
562{
563 struct nfs4_lock_state *lsp;
564 struct nfs4_client *clp = state->owner->so_client;
565
566 lsp = kmalloc(sizeof(*lsp), GFP_KERNEL);
567 if (lsp == NULL)
568 return NULL;
569 lsp->ls_flags = 0;
570 lsp->ls_seqid = 0; /* arbitrary */
571 lsp->ls_id = -1;
572 memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
573 atomic_set(&lsp->ls_count, 1);
574 lsp->ls_owner = fl_owner;
575 INIT_LIST_HEAD(&lsp->ls_locks);
576 spin_lock(&clp->cl_lock);
577 lsp->ls_id = nfs4_alloc_lockowner_id(clp);
578 spin_unlock(&clp->cl_lock);
579 return lsp;
580}
581
582/*
583 * Return a compatible lock_state. If no initialized lock_state structure
584 * exists, return an uninitialized one.
585 *
586 * The caller must be holding state->lock_sema and clp->cl_sem
587 */
588struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
589{
590 struct nfs4_lock_state * lsp;
591
592 lsp = nfs4_find_lock_state(state, owner);
593 if (lsp == NULL)
594 lsp = nfs4_alloc_lock_state(state, owner);
595 return lsp;
596}
597
598/*
599 * Byte-range lock aware utility to initialize the stateid of read/write
600 * requests.
601 */
602void
603nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
604{
605 if (test_bit(LK_STATE_IN_USE, &state->flags)) {
606 struct nfs4_lock_state *lsp;
607
608 lsp = nfs4_find_lock_state(state, fl_owner);
609 if (lsp) {
610 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
611 nfs4_put_lock_state(lsp);
612 return;
613 }
614 }
615 memcpy(dst, &state->stateid, sizeof(*dst));
616}
617
618/*
619* Called with state->lock_sema and clp->cl_sem held.
620*/
621void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
622{
623 if (status == NFS_OK || seqid_mutating_err(-status))
624 lsp->ls_seqid++;
625}
626
627/*
628* Check to see if the request lock (type FL_UNLK) effects the fl lock.
629*
630* fl and request must have the same posix owner
631*
632* return:
633* 0 -> fl not effected by request
634* 1 -> fl consumed by request
635*/
636
637static int
638nfs4_check_unlock(struct file_lock *fl, struct file_lock *request)
639{
640 if (fl->fl_start >= request->fl_start && fl->fl_end <= request->fl_end)
641 return 1;
642 return 0;
643}
644
645/*
646 * Post an initialized lock_state on the state->lock_states list.
647 */
648void nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
649{
650 if (!list_empty(&lsp->ls_locks))
651 return;
652 atomic_inc(&lsp->ls_count);
653 write_lock(&state->state_lock);
654 list_add(&lsp->ls_locks, &state->lock_states);
655 set_bit(LK_STATE_IN_USE, &state->flags);
656 write_unlock(&state->state_lock);
657}
658
659/*
660 * to decide to 'reap' lock state:
661 * 1) search i_flock for file_locks with fl.lock_state = to ls.
662 * 2) determine if unlock will consume found lock.
663 * if so, reap
664 *
665 * else, don't reap.
666 *
667 */
668void
669nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
670{
671 struct inode *inode = state->inode;
672 struct file_lock *fl;
673
674 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
675 if (!(fl->fl_flags & FL_POSIX))
676 continue;
677 if (fl->fl_owner != lsp->ls_owner)
678 continue;
679 /* Exit if we find at least one lock which is not consumed */
680 if (nfs4_check_unlock(fl,request) == 0)
681 return;
682 }
683
684 write_lock(&state->state_lock);
685 list_del_init(&lsp->ls_locks);
686 if (list_empty(&state->lock_states))
687 clear_bit(LK_STATE_IN_USE, &state->flags);
688 write_unlock(&state->state_lock);
689 nfs4_put_lock_state(lsp);
690}
691
692/*
693 * Release reference to lock_state, and free it if we see that
694 * it is no longer in use
695 */
696void
697nfs4_put_lock_state(struct nfs4_lock_state *lsp)
698{
699 if (!atomic_dec_and_test(&lsp->ls_count))
700 return;
701 BUG_ON (!list_empty(&lsp->ls_locks));
702 kfree(lsp);
703}
704
705/*
706* Called with sp->so_sema and clp->cl_sem held.
707*
708* Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
709* failed with a seqid incrementing error -
710* see comments nfs_fs.h:seqid_mutating_error()
711*/
712void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp)
713{
714 if (status == NFS_OK || seqid_mutating_err(-status))
715 sp->so_seqid++;
716 /* If the server returns BAD_SEQID, unhash state_owner here */
717 if (status == -NFS4ERR_BAD_SEQID)
718 nfs4_drop_state_owner(sp);
719}
720
721static int reclaimer(void *);
722struct reclaimer_args {
723 struct nfs4_client *clp;
724 struct completion complete;
725};
726
727/*
728 * State recovery routine
729 */
730void
731nfs4_recover_state(void *data)
732{
733 struct nfs4_client *clp = (struct nfs4_client *)data;
734 struct reclaimer_args args = {
735 .clp = clp,
736 };
737 might_sleep();
738
739 init_completion(&args.complete);
740
741 if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
742 goto out_failed_clear;
743 wait_for_completion(&args.complete);
744 return;
745out_failed_clear:
746 set_bit(NFS4CLNT_OK, &clp->cl_state);
747 wake_up_all(&clp->cl_waitq);
748 rpc_wake_up(&clp->cl_rpcwaitq);
749}
750
751/*
752 * Schedule a state recovery attempt
753 */
754void
755nfs4_schedule_state_recovery(struct nfs4_client *clp)
756{
757 if (!clp)
758 return;
759 if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
760 schedule_work(&clp->cl_recoverd);
761}
762
763static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
764{
765 struct inode *inode = state->inode;
766 struct file_lock *fl;
767 int status = 0;
768
769 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
770 if (!(fl->fl_flags & FL_POSIX))
771 continue;
772 if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
773 continue;
774 status = ops->recover_lock(state, fl);
775 if (status >= 0)
776 continue;
777 switch (status) {
778 default:
779 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
780 __FUNCTION__, status);
781 case -NFS4ERR_EXPIRED:
782 case -NFS4ERR_NO_GRACE:
783 case -NFS4ERR_RECLAIM_BAD:
784 case -NFS4ERR_RECLAIM_CONFLICT:
785 /* kill_proc(fl->fl_owner, SIGLOST, 1); */
786 break;
787 case -NFS4ERR_STALE_CLIENTID:
788 goto out_err;
789 }
790 }
791 return 0;
792out_err:
793 return status;
794}
795
796static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
797{
798 struct nfs4_state *state;
799 struct nfs4_lock_state *lock;
800 int status = 0;
801
802 /* Note: we rely on the sp->so_states list being ordered
803 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
804 * states first.
805 * This is needed to ensure that the server won't give us any
806 * read delegations that we have to return if, say, we are
807 * recovering after a network partition or a reboot from a
808 * server that doesn't support a grace period.
809 */
810 list_for_each_entry(state, &sp->so_states, open_states) {
811 if (state->state == 0)
812 continue;
813 status = ops->recover_open(sp, state);
814 list_for_each_entry(lock, &state->lock_states, ls_locks)
815 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
816 if (status >= 0) {
817 status = nfs4_reclaim_locks(ops, state);
818 if (status < 0)
819 goto out_err;
820 list_for_each_entry(lock, &state->lock_states, ls_locks) {
821 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
822 printk("%s: Lock reclaim failed!\n",
823 __FUNCTION__);
824 }
825 continue;
826 }
827 switch (status) {
828 default:
829 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
830 __FUNCTION__, status);
831 case -ENOENT:
832 case -NFS4ERR_RECLAIM_BAD:
833 case -NFS4ERR_RECLAIM_CONFLICT:
834 /*
835 * Open state on this file cannot be recovered
836 * All we can do is revert to using the zero stateid.
837 */
838 memset(state->stateid.data, 0,
839 sizeof(state->stateid.data));
840 /* Mark the file as being 'closed' */
841 state->state = 0;
842 break;
843 case -NFS4ERR_EXPIRED:
844 case -NFS4ERR_NO_GRACE:
845 case -NFS4ERR_STALE_CLIENTID:
846 goto out_err;
847 }
848 }
849 return 0;
850out_err:
851 return status;
852}
853
854static int reclaimer(void *ptr)
855{
856 struct reclaimer_args *args = (struct reclaimer_args *)ptr;
857 struct nfs4_client *clp = args->clp;
858 struct nfs4_state_owner *sp;
859 struct nfs4_state_recovery_ops *ops;
860 int status = 0;
861
862 daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
863 allow_signal(SIGKILL);
864
865 atomic_inc(&clp->cl_count);
866 complete(&args->complete);
867
868 /* Ensure exclusive access to NFSv4 state */
869 lock_kernel();
870 down_write(&clp->cl_sem);
871 /* Are there any NFS mounts out there? */
872 if (list_empty(&clp->cl_superblocks))
873 goto out;
874restart_loop:
875 status = nfs4_proc_renew(clp);
876 switch (status) {
877 case 0:
878 case -NFS4ERR_CB_PATH_DOWN:
879 goto out;
880 case -NFS4ERR_STALE_CLIENTID:
881 case -NFS4ERR_LEASE_MOVED:
882 ops = &nfs4_reboot_recovery_ops;
883 break;
884 default:
885 ops = &nfs4_network_partition_recovery_ops;
886 };
887 status = __nfs4_init_client(clp);
888 if (status)
889 goto out_error;
890 /* Mark all delegations for reclaim */
891 nfs_delegation_mark_reclaim(clp);
892 /* Note: list is protected by exclusive lock on cl->cl_sem */
893 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
894 status = nfs4_reclaim_open_state(ops, sp);
895 if (status < 0) {
896 if (status == -NFS4ERR_NO_GRACE) {
897 ops = &nfs4_network_partition_recovery_ops;
898 status = nfs4_reclaim_open_state(ops, sp);
899 }
900 if (status == -NFS4ERR_STALE_CLIENTID)
901 goto restart_loop;
902 if (status == -NFS4ERR_EXPIRED)
903 goto restart_loop;
904 }
905 }
906 nfs_delegation_reap_unclaimed(clp);
907out:
908 set_bit(NFS4CLNT_OK, &clp->cl_state);
909 up_write(&clp->cl_sem);
910 unlock_kernel();
911 wake_up_all(&clp->cl_waitq);
912 rpc_wake_up(&clp->cl_rpcwaitq);
913 if (status == -NFS4ERR_CB_PATH_DOWN)
914 nfs_handle_cb_pathdown(clp);
915 nfs4_put_client(clp);
916 return 0;
917out_error:
918 printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
919 NIPQUAD(clp->cl_addr.s_addr), -status);
920 goto out;
921}
922
923/*
924 * Local variables:
925 * c-basic-offset: 8
926 * End:
927 */