]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/nfs/nfs4state.c
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6
[mirror_ubuntu-bionic-kernel.git] / fs / nfs / nfs4state.c
1 /*
2 * fs/nfs/nfs4state.c
3 *
4 * Client-side XDR for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
38 * subsequent patch.
39 */
40
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/random.h>
49 #include <linux/workqueue.h>
50 #include <linux/bitops.h>
51
52 #include "nfs4_fs.h"
53 #include "callback.h"
54 #include "delegation.h"
55 #include "internal.h"
56
57 #define OPENOWNER_POOL_SIZE 8
58
59 const nfs4_stateid zero_stateid;
60
61 static LIST_HEAD(nfs4_clientid_list);
62
63 int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
64 {
65 struct nfs4_setclientid_res clid;
66 unsigned short port;
67 int status;
68
69 port = nfs_callback_tcpport;
70 if (clp->cl_addr.ss_family == AF_INET6)
71 port = nfs_callback_tcpport6;
72
73 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
74 if (status != 0)
75 goto out;
76 status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
77 if (status != 0)
78 goto out;
79 clp->cl_clientid = clid.clientid;
80 nfs4_schedule_state_renewal(clp);
81 out:
82 return status;
83 }
84
85 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
86 {
87 struct rpc_cred *cred = NULL;
88
89 if (clp->cl_machine_cred != NULL)
90 cred = get_rpccred(clp->cl_machine_cred);
91 return cred;
92 }
93
94 static void nfs4_clear_machine_cred(struct nfs_client *clp)
95 {
96 struct rpc_cred *cred;
97
98 spin_lock(&clp->cl_lock);
99 cred = clp->cl_machine_cred;
100 clp->cl_machine_cred = NULL;
101 spin_unlock(&clp->cl_lock);
102 if (cred != NULL)
103 put_rpccred(cred);
104 }
105
106 struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
107 {
108 struct nfs4_state_owner *sp;
109 struct rb_node *pos;
110 struct rpc_cred *cred = NULL;
111
112 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
113 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
114 if (list_empty(&sp->so_states))
115 continue;
116 cred = get_rpccred(sp->so_cred);
117 break;
118 }
119 return cred;
120 }
121
122 #if defined(CONFIG_NFS_V4_1)
123
124 static int nfs41_setup_state_renewal(struct nfs_client *clp)
125 {
126 int status;
127 struct nfs_fsinfo fsinfo;
128
129 status = nfs4_proc_get_lease_time(clp, &fsinfo);
130 if (status == 0) {
131 /* Update lease time and schedule renewal */
132 spin_lock(&clp->cl_lock);
133 clp->cl_lease_time = fsinfo.lease_time * HZ;
134 clp->cl_last_renewal = jiffies;
135 spin_unlock(&clp->cl_lock);
136
137 nfs4_schedule_state_renewal(clp);
138 }
139
140 return status;
141 }
142
143 static void nfs4_end_drain_session(struct nfs_client *clp)
144 {
145 struct nfs4_session *ses = clp->cl_session;
146 int max_slots;
147
148 if (test_and_clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) {
149 spin_lock(&ses->fc_slot_table.slot_tbl_lock);
150 max_slots = ses->fc_slot_table.max_slots;
151 while (max_slots--) {
152 struct rpc_task *task;
153
154 task = rpc_wake_up_next(&ses->fc_slot_table.
155 slot_tbl_waitq);
156 if (!task)
157 break;
158 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
159 }
160 spin_unlock(&ses->fc_slot_table.slot_tbl_lock);
161 }
162 }
163
164 static int nfs4_begin_drain_session(struct nfs_client *clp)
165 {
166 struct nfs4_session *ses = clp->cl_session;
167 struct nfs4_slot_table *tbl = &ses->fc_slot_table;
168
169 spin_lock(&tbl->slot_tbl_lock);
170 set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
171 if (tbl->highest_used_slotid != -1) {
172 INIT_COMPLETION(ses->complete);
173 spin_unlock(&tbl->slot_tbl_lock);
174 return wait_for_completion_interruptible(&ses->complete);
175 }
176 spin_unlock(&tbl->slot_tbl_lock);
177 return 0;
178 }
179
180 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
181 {
182 int status;
183
184 nfs4_begin_drain_session(clp);
185 status = nfs4_proc_exchange_id(clp, cred);
186 if (status != 0)
187 goto out;
188 status = nfs4_proc_create_session(clp);
189 if (status != 0)
190 goto out;
191 nfs41_setup_state_renewal(clp);
192 nfs_mark_client_ready(clp, NFS_CS_READY);
193 out:
194 return status;
195 }
196
197 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
198 {
199 struct rpc_cred *cred;
200
201 spin_lock(&clp->cl_lock);
202 cred = nfs4_get_machine_cred_locked(clp);
203 spin_unlock(&clp->cl_lock);
204 return cred;
205 }
206
207 #endif /* CONFIG_NFS_V4_1 */
208
209 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
210 {
211 struct nfs4_state_owner *sp;
212 struct rb_node *pos;
213 struct rpc_cred *cred;
214
215 spin_lock(&clp->cl_lock);
216 cred = nfs4_get_machine_cred_locked(clp);
217 if (cred != NULL)
218 goto out;
219 pos = rb_first(&clp->cl_state_owners);
220 if (pos != NULL) {
221 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
222 cred = get_rpccred(sp->so_cred);
223 }
224 out:
225 spin_unlock(&clp->cl_lock);
226 return cred;
227 }
228
229 static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
230 __u64 minval, int maxbits)
231 {
232 struct rb_node **p, *parent;
233 struct nfs_unique_id *pos;
234 __u64 mask = ~0ULL;
235
236 if (maxbits < 64)
237 mask = (1ULL << maxbits) - 1ULL;
238
239 /* Ensure distribution is more or less flat */
240 get_random_bytes(&new->id, sizeof(new->id));
241 new->id &= mask;
242 if (new->id < minval)
243 new->id += minval;
244 retry:
245 p = &root->rb_node;
246 parent = NULL;
247
248 while (*p != NULL) {
249 parent = *p;
250 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
251
252 if (new->id < pos->id)
253 p = &(*p)->rb_left;
254 else if (new->id > pos->id)
255 p = &(*p)->rb_right;
256 else
257 goto id_exists;
258 }
259 rb_link_node(&new->rb_node, parent, p);
260 rb_insert_color(&new->rb_node, root);
261 return;
262 id_exists:
263 for (;;) {
264 new->id++;
265 if (new->id < minval || (new->id & mask) != new->id) {
266 new->id = minval;
267 break;
268 }
269 parent = rb_next(parent);
270 if (parent == NULL)
271 break;
272 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
273 if (new->id < pos->id)
274 break;
275 }
276 goto retry;
277 }
278
279 static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
280 {
281 rb_erase(&id->rb_node, root);
282 }
283
284 static struct nfs4_state_owner *
285 nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
286 {
287 struct nfs_client *clp = server->nfs_client;
288 struct rb_node **p = &clp->cl_state_owners.rb_node,
289 *parent = NULL;
290 struct nfs4_state_owner *sp, *res = NULL;
291
292 while (*p != NULL) {
293 parent = *p;
294 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
295
296 if (server < sp->so_server) {
297 p = &parent->rb_left;
298 continue;
299 }
300 if (server > sp->so_server) {
301 p = &parent->rb_right;
302 continue;
303 }
304 if (cred < sp->so_cred)
305 p = &parent->rb_left;
306 else if (cred > sp->so_cred)
307 p = &parent->rb_right;
308 else {
309 atomic_inc(&sp->so_count);
310 res = sp;
311 break;
312 }
313 }
314 return res;
315 }
316
317 static struct nfs4_state_owner *
318 nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
319 {
320 struct rb_node **p = &clp->cl_state_owners.rb_node,
321 *parent = NULL;
322 struct nfs4_state_owner *sp;
323
324 while (*p != NULL) {
325 parent = *p;
326 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
327
328 if (new->so_server < sp->so_server) {
329 p = &parent->rb_left;
330 continue;
331 }
332 if (new->so_server > sp->so_server) {
333 p = &parent->rb_right;
334 continue;
335 }
336 if (new->so_cred < sp->so_cred)
337 p = &parent->rb_left;
338 else if (new->so_cred > sp->so_cred)
339 p = &parent->rb_right;
340 else {
341 atomic_inc(&sp->so_count);
342 return sp;
343 }
344 }
345 nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
346 rb_link_node(&new->so_client_node, parent, p);
347 rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
348 return new;
349 }
350
351 static void
352 nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
353 {
354 if (!RB_EMPTY_NODE(&sp->so_client_node))
355 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
356 nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
357 }
358
359 /*
360 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
361 * create a new state_owner.
362 *
363 */
364 static struct nfs4_state_owner *
365 nfs4_alloc_state_owner(void)
366 {
367 struct nfs4_state_owner *sp;
368
369 sp = kzalloc(sizeof(*sp),GFP_NOFS);
370 if (!sp)
371 return NULL;
372 spin_lock_init(&sp->so_lock);
373 INIT_LIST_HEAD(&sp->so_states);
374 INIT_LIST_HEAD(&sp->so_delegations);
375 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
376 sp->so_seqid.sequence = &sp->so_sequence;
377 spin_lock_init(&sp->so_sequence.lock);
378 INIT_LIST_HEAD(&sp->so_sequence.list);
379 atomic_set(&sp->so_count, 1);
380 return sp;
381 }
382
383 static void
384 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
385 {
386 if (!RB_EMPTY_NODE(&sp->so_client_node)) {
387 struct nfs_client *clp = sp->so_client;
388
389 spin_lock(&clp->cl_lock);
390 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
391 RB_CLEAR_NODE(&sp->so_client_node);
392 spin_unlock(&clp->cl_lock);
393 }
394 }
395
396 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
397 {
398 struct nfs_client *clp = server->nfs_client;
399 struct nfs4_state_owner *sp, *new;
400
401 spin_lock(&clp->cl_lock);
402 sp = nfs4_find_state_owner(server, cred);
403 spin_unlock(&clp->cl_lock);
404 if (sp != NULL)
405 return sp;
406 new = nfs4_alloc_state_owner();
407 if (new == NULL)
408 return NULL;
409 new->so_client = clp;
410 new->so_server = server;
411 new->so_cred = cred;
412 spin_lock(&clp->cl_lock);
413 sp = nfs4_insert_state_owner(clp, new);
414 spin_unlock(&clp->cl_lock);
415 if (sp == new)
416 get_rpccred(cred);
417 else {
418 rpc_destroy_wait_queue(&new->so_sequence.wait);
419 kfree(new);
420 }
421 return sp;
422 }
423
424 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
425 {
426 struct nfs_client *clp = sp->so_client;
427 struct rpc_cred *cred = sp->so_cred;
428
429 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
430 return;
431 nfs4_remove_state_owner(clp, sp);
432 spin_unlock(&clp->cl_lock);
433 rpc_destroy_wait_queue(&sp->so_sequence.wait);
434 put_rpccred(cred);
435 kfree(sp);
436 }
437
438 static struct nfs4_state *
439 nfs4_alloc_open_state(void)
440 {
441 struct nfs4_state *state;
442
443 state = kzalloc(sizeof(*state), GFP_NOFS);
444 if (!state)
445 return NULL;
446 atomic_set(&state->count, 1);
447 INIT_LIST_HEAD(&state->lock_states);
448 spin_lock_init(&state->state_lock);
449 seqlock_init(&state->seqlock);
450 return state;
451 }
452
453 void
454 nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
455 {
456 if (state->state == fmode)
457 return;
458 /* NB! List reordering - see the reclaim code for why. */
459 if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
460 if (fmode & FMODE_WRITE)
461 list_move(&state->open_states, &state->owner->so_states);
462 else
463 list_move_tail(&state->open_states, &state->owner->so_states);
464 }
465 state->state = fmode;
466 }
467
468 static struct nfs4_state *
469 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
470 {
471 struct nfs_inode *nfsi = NFS_I(inode);
472 struct nfs4_state *state;
473
474 list_for_each_entry(state, &nfsi->open_states, inode_states) {
475 if (state->owner != owner)
476 continue;
477 if (atomic_inc_not_zero(&state->count))
478 return state;
479 }
480 return NULL;
481 }
482
483 static void
484 nfs4_free_open_state(struct nfs4_state *state)
485 {
486 kfree(state);
487 }
488
489 struct nfs4_state *
490 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
491 {
492 struct nfs4_state *state, *new;
493 struct nfs_inode *nfsi = NFS_I(inode);
494
495 spin_lock(&inode->i_lock);
496 state = __nfs4_find_state_byowner(inode, owner);
497 spin_unlock(&inode->i_lock);
498 if (state)
499 goto out;
500 new = nfs4_alloc_open_state();
501 spin_lock(&owner->so_lock);
502 spin_lock(&inode->i_lock);
503 state = __nfs4_find_state_byowner(inode, owner);
504 if (state == NULL && new != NULL) {
505 state = new;
506 state->owner = owner;
507 atomic_inc(&owner->so_count);
508 list_add(&state->inode_states, &nfsi->open_states);
509 state->inode = igrab(inode);
510 spin_unlock(&inode->i_lock);
511 /* Note: The reclaim code dictates that we add stateless
512 * and read-only stateids to the end of the list */
513 list_add_tail(&state->open_states, &owner->so_states);
514 spin_unlock(&owner->so_lock);
515 } else {
516 spin_unlock(&inode->i_lock);
517 spin_unlock(&owner->so_lock);
518 if (new)
519 nfs4_free_open_state(new);
520 }
521 out:
522 return state;
523 }
524
525 void nfs4_put_open_state(struct nfs4_state *state)
526 {
527 struct inode *inode = state->inode;
528 struct nfs4_state_owner *owner = state->owner;
529
530 if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
531 return;
532 spin_lock(&inode->i_lock);
533 list_del(&state->inode_states);
534 list_del(&state->open_states);
535 spin_unlock(&inode->i_lock);
536 spin_unlock(&owner->so_lock);
537 iput(inode);
538 nfs4_free_open_state(state);
539 nfs4_put_state_owner(owner);
540 }
541
542 /*
543 * Close the current file.
544 */
545 static void __nfs4_close(struct path *path, struct nfs4_state *state,
546 fmode_t fmode, gfp_t gfp_mask, int wait)
547 {
548 struct nfs4_state_owner *owner = state->owner;
549 int call_close = 0;
550 fmode_t newstate;
551
552 atomic_inc(&owner->so_count);
553 /* Protect against nfs4_find_state() */
554 spin_lock(&owner->so_lock);
555 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
556 case FMODE_READ:
557 state->n_rdonly--;
558 break;
559 case FMODE_WRITE:
560 state->n_wronly--;
561 break;
562 case FMODE_READ|FMODE_WRITE:
563 state->n_rdwr--;
564 }
565 newstate = FMODE_READ|FMODE_WRITE;
566 if (state->n_rdwr == 0) {
567 if (state->n_rdonly == 0) {
568 newstate &= ~FMODE_READ;
569 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
570 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
571 }
572 if (state->n_wronly == 0) {
573 newstate &= ~FMODE_WRITE;
574 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
575 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
576 }
577 if (newstate == 0)
578 clear_bit(NFS_DELEGATED_STATE, &state->flags);
579 }
580 nfs4_state_set_mode_locked(state, newstate);
581 spin_unlock(&owner->so_lock);
582
583 if (!call_close) {
584 nfs4_put_open_state(state);
585 nfs4_put_state_owner(owner);
586 } else
587 nfs4_do_close(path, state, gfp_mask, wait);
588 }
589
590 void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
591 {
592 __nfs4_close(path, state, fmode, GFP_NOFS, 0);
593 }
594
595 void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
596 {
597 __nfs4_close(path, state, fmode, GFP_KERNEL, 1);
598 }
599
600 /*
601 * Search the state->lock_states for an existing lock_owner
602 * that is compatible with current->files
603 */
604 static struct nfs4_lock_state *
605 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
606 {
607 struct nfs4_lock_state *pos;
608 list_for_each_entry(pos, &state->lock_states, ls_locks) {
609 if (pos->ls_owner != fl_owner)
610 continue;
611 atomic_inc(&pos->ls_count);
612 return pos;
613 }
614 return NULL;
615 }
616
617 /*
618 * Return a compatible lock_state. If no initialized lock_state structure
619 * exists, return an uninitialized one.
620 *
621 */
622 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
623 {
624 struct nfs4_lock_state *lsp;
625 struct nfs_client *clp = state->owner->so_client;
626
627 lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
628 if (lsp == NULL)
629 return NULL;
630 rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
631 spin_lock_init(&lsp->ls_sequence.lock);
632 INIT_LIST_HEAD(&lsp->ls_sequence.list);
633 lsp->ls_seqid.sequence = &lsp->ls_sequence;
634 atomic_set(&lsp->ls_count, 1);
635 lsp->ls_state = state;
636 lsp->ls_owner = fl_owner;
637 spin_lock(&clp->cl_lock);
638 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
639 spin_unlock(&clp->cl_lock);
640 INIT_LIST_HEAD(&lsp->ls_locks);
641 return lsp;
642 }
643
644 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
645 {
646 struct nfs_client *clp = lsp->ls_state->owner->so_client;
647
648 spin_lock(&clp->cl_lock);
649 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
650 spin_unlock(&clp->cl_lock);
651 rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
652 kfree(lsp);
653 }
654
655 /*
656 * Return a compatible lock_state. If no initialized lock_state structure
657 * exists, return an uninitialized one.
658 *
659 */
660 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
661 {
662 struct nfs4_lock_state *lsp, *new = NULL;
663
664 for(;;) {
665 spin_lock(&state->state_lock);
666 lsp = __nfs4_find_lock_state(state, owner);
667 if (lsp != NULL)
668 break;
669 if (new != NULL) {
670 list_add(&new->ls_locks, &state->lock_states);
671 set_bit(LK_STATE_IN_USE, &state->flags);
672 lsp = new;
673 new = NULL;
674 break;
675 }
676 spin_unlock(&state->state_lock);
677 new = nfs4_alloc_lock_state(state, owner);
678 if (new == NULL)
679 return NULL;
680 }
681 spin_unlock(&state->state_lock);
682 if (new != NULL)
683 nfs4_free_lock_state(new);
684 return lsp;
685 }
686
687 /*
688 * Release reference to lock_state, and free it if we see that
689 * it is no longer in use
690 */
691 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
692 {
693 struct nfs4_state *state;
694
695 if (lsp == NULL)
696 return;
697 state = lsp->ls_state;
698 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
699 return;
700 list_del(&lsp->ls_locks);
701 if (list_empty(&state->lock_states))
702 clear_bit(LK_STATE_IN_USE, &state->flags);
703 spin_unlock(&state->state_lock);
704 nfs4_free_lock_state(lsp);
705 }
706
707 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
708 {
709 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
710
711 dst->fl_u.nfs4_fl.owner = lsp;
712 atomic_inc(&lsp->ls_count);
713 }
714
715 static void nfs4_fl_release_lock(struct file_lock *fl)
716 {
717 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
718 }
719
720 static const struct file_lock_operations nfs4_fl_lock_ops = {
721 .fl_copy_lock = nfs4_fl_copy_lock,
722 .fl_release_private = nfs4_fl_release_lock,
723 };
724
725 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
726 {
727 struct nfs4_lock_state *lsp;
728
729 if (fl->fl_ops != NULL)
730 return 0;
731 lsp = nfs4_get_lock_state(state, fl->fl_owner);
732 if (lsp == NULL)
733 return -ENOMEM;
734 fl->fl_u.nfs4_fl.owner = lsp;
735 fl->fl_ops = &nfs4_fl_lock_ops;
736 return 0;
737 }
738
739 /*
740 * Byte-range lock aware utility to initialize the stateid of read/write
741 * requests.
742 */
743 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
744 {
745 struct nfs4_lock_state *lsp;
746 int seq;
747
748 do {
749 seq = read_seqbegin(&state->seqlock);
750 memcpy(dst, &state->stateid, sizeof(*dst));
751 } while (read_seqretry(&state->seqlock, seq));
752 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
753 return;
754
755 spin_lock(&state->state_lock);
756 lsp = __nfs4_find_lock_state(state, fl_owner);
757 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
758 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
759 spin_unlock(&state->state_lock);
760 nfs4_put_lock_state(lsp);
761 }
762
763 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
764 {
765 struct nfs_seqid *new;
766
767 new = kmalloc(sizeof(*new), gfp_mask);
768 if (new != NULL) {
769 new->sequence = counter;
770 INIT_LIST_HEAD(&new->list);
771 }
772 return new;
773 }
774
775 void nfs_release_seqid(struct nfs_seqid *seqid)
776 {
777 if (!list_empty(&seqid->list)) {
778 struct rpc_sequence *sequence = seqid->sequence->sequence;
779
780 spin_lock(&sequence->lock);
781 list_del_init(&seqid->list);
782 spin_unlock(&sequence->lock);
783 rpc_wake_up(&sequence->wait);
784 }
785 }
786
787 void nfs_free_seqid(struct nfs_seqid *seqid)
788 {
789 nfs_release_seqid(seqid);
790 kfree(seqid);
791 }
792
793 /*
794 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
795 * failed with a seqid incrementing error -
796 * see comments nfs_fs.h:seqid_mutating_error()
797 */
798 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
799 {
800 BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
801 switch (status) {
802 case 0:
803 break;
804 case -NFS4ERR_BAD_SEQID:
805 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
806 return;
807 printk(KERN_WARNING "NFS: v4 server returned a bad"
808 " sequence-id error on an"
809 " unconfirmed sequence %p!\n",
810 seqid->sequence);
811 case -NFS4ERR_STALE_CLIENTID:
812 case -NFS4ERR_STALE_STATEID:
813 case -NFS4ERR_BAD_STATEID:
814 case -NFS4ERR_BADXDR:
815 case -NFS4ERR_RESOURCE:
816 case -NFS4ERR_NOFILEHANDLE:
817 /* Non-seqid mutating errors */
818 return;
819 };
820 /*
821 * Note: no locking needed as we are guaranteed to be first
822 * on the sequence list
823 */
824 seqid->sequence->counter++;
825 }
826
827 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
828 {
829 struct nfs4_state_owner *sp = container_of(seqid->sequence,
830 struct nfs4_state_owner, so_seqid);
831 struct nfs_server *server = sp->so_server;
832
833 if (status == -NFS4ERR_BAD_SEQID)
834 nfs4_drop_state_owner(sp);
835 if (!nfs4_has_session(server->nfs_client))
836 nfs_increment_seqid(status, seqid);
837 }
838
839 /*
840 * Increment the seqid if the LOCK/LOCKU succeeded, or
841 * failed with a seqid incrementing error -
842 * see comments nfs_fs.h:seqid_mutating_error()
843 */
844 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
845 {
846 nfs_increment_seqid(status, seqid);
847 }
848
849 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
850 {
851 struct rpc_sequence *sequence = seqid->sequence->sequence;
852 int status = 0;
853
854 spin_lock(&sequence->lock);
855 if (list_empty(&seqid->list))
856 list_add_tail(&seqid->list, &sequence->list);
857 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
858 goto unlock;
859 rpc_sleep_on(&sequence->wait, task, NULL);
860 status = -EAGAIN;
861 unlock:
862 spin_unlock(&sequence->lock);
863 return status;
864 }
865
866 static int nfs4_run_state_manager(void *);
867
868 static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
869 {
870 smp_mb__before_clear_bit();
871 clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
872 smp_mb__after_clear_bit();
873 wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
874 rpc_wake_up(&clp->cl_rpcwaitq);
875 }
876
877 /*
878 * Schedule the nfs_client asynchronous state management routine
879 */
880 void nfs4_schedule_state_manager(struct nfs_client *clp)
881 {
882 struct task_struct *task;
883
884 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
885 return;
886 __module_get(THIS_MODULE);
887 atomic_inc(&clp->cl_count);
888 task = kthread_run(nfs4_run_state_manager, clp, "%s-manager",
889 rpc_peeraddr2str(clp->cl_rpcclient,
890 RPC_DISPLAY_ADDR));
891 if (!IS_ERR(task))
892 return;
893 nfs4_clear_state_manager_bit(clp);
894 nfs_put_client(clp);
895 module_put(THIS_MODULE);
896 }
897
898 /*
899 * Schedule a state recovery attempt
900 */
901 void nfs4_schedule_state_recovery(struct nfs_client *clp)
902 {
903 if (!clp)
904 return;
905 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
906 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
907 nfs4_schedule_state_manager(clp);
908 }
909
910 int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
911 {
912
913 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
914 /* Don't recover state that expired before the reboot */
915 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
916 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
917 return 0;
918 }
919 set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
920 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
921 return 1;
922 }
923
924 int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
925 {
926 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
927 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
928 set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
929 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
930 return 1;
931 }
932
933 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
934 {
935 struct inode *inode = state->inode;
936 struct nfs_inode *nfsi = NFS_I(inode);
937 struct file_lock *fl;
938 int status = 0;
939
940 if (inode->i_flock == NULL)
941 return 0;
942
943 /* Guard against delegation returns and new lock/unlock calls */
944 down_write(&nfsi->rwsem);
945 /* Protect inode->i_flock using the BKL */
946 lock_kernel();
947 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
948 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
949 continue;
950 if (nfs_file_open_context(fl->fl_file)->state != state)
951 continue;
952 unlock_kernel();
953 status = ops->recover_lock(state, fl);
954 switch (status) {
955 case 0:
956 break;
957 case -ESTALE:
958 case -NFS4ERR_ADMIN_REVOKED:
959 case -NFS4ERR_STALE_STATEID:
960 case -NFS4ERR_BAD_STATEID:
961 case -NFS4ERR_EXPIRED:
962 case -NFS4ERR_NO_GRACE:
963 case -NFS4ERR_STALE_CLIENTID:
964 case -NFS4ERR_BADSESSION:
965 case -NFS4ERR_BADSLOT:
966 case -NFS4ERR_BAD_HIGH_SLOT:
967 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
968 goto out;
969 default:
970 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
971 __func__, status);
972 case -ENOMEM:
973 case -NFS4ERR_DENIED:
974 case -NFS4ERR_RECLAIM_BAD:
975 case -NFS4ERR_RECLAIM_CONFLICT:
976 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
977 status = 0;
978 }
979 lock_kernel();
980 }
981 unlock_kernel();
982 out:
983 up_write(&nfsi->rwsem);
984 return status;
985 }
986
987 static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
988 {
989 struct nfs4_state *state;
990 struct nfs4_lock_state *lock;
991 int status = 0;
992
993 /* Note: we rely on the sp->so_states list being ordered
994 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
995 * states first.
996 * This is needed to ensure that the server won't give us any
997 * read delegations that we have to return if, say, we are
998 * recovering after a network partition or a reboot from a
999 * server that doesn't support a grace period.
1000 */
1001 restart:
1002 spin_lock(&sp->so_lock);
1003 list_for_each_entry(state, &sp->so_states, open_states) {
1004 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
1005 continue;
1006 if (state->state == 0)
1007 continue;
1008 atomic_inc(&state->count);
1009 spin_unlock(&sp->so_lock);
1010 status = ops->recover_open(sp, state);
1011 if (status >= 0) {
1012 status = nfs4_reclaim_locks(state, ops);
1013 if (status >= 0) {
1014 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1015 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
1016 printk("%s: Lock reclaim failed!\n",
1017 __func__);
1018 }
1019 nfs4_put_open_state(state);
1020 goto restart;
1021 }
1022 }
1023 switch (status) {
1024 default:
1025 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
1026 __func__, status);
1027 case -ENOENT:
1028 case -ENOMEM:
1029 case -ESTALE:
1030 /*
1031 * Open state on this file cannot be recovered
1032 * All we can do is revert to using the zero stateid.
1033 */
1034 memset(state->stateid.data, 0,
1035 sizeof(state->stateid.data));
1036 /* Mark the file as being 'closed' */
1037 state->state = 0;
1038 break;
1039 case -NFS4ERR_ADMIN_REVOKED:
1040 case -NFS4ERR_STALE_STATEID:
1041 case -NFS4ERR_BAD_STATEID:
1042 case -NFS4ERR_RECLAIM_BAD:
1043 case -NFS4ERR_RECLAIM_CONFLICT:
1044 nfs4_state_mark_reclaim_nograce(sp->so_client, state);
1045 break;
1046 case -NFS4ERR_EXPIRED:
1047 case -NFS4ERR_NO_GRACE:
1048 nfs4_state_mark_reclaim_nograce(sp->so_client, state);
1049 case -NFS4ERR_STALE_CLIENTID:
1050 case -NFS4ERR_BADSESSION:
1051 case -NFS4ERR_BADSLOT:
1052 case -NFS4ERR_BAD_HIGH_SLOT:
1053 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1054 goto out_err;
1055 }
1056 nfs4_put_open_state(state);
1057 goto restart;
1058 }
1059 spin_unlock(&sp->so_lock);
1060 return 0;
1061 out_err:
1062 nfs4_put_open_state(state);
1063 return status;
1064 }
1065
1066 static void nfs4_clear_open_state(struct nfs4_state *state)
1067 {
1068 struct nfs4_lock_state *lock;
1069
1070 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1071 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1072 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1073 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1074 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1075 lock->ls_seqid.flags = 0;
1076 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
1077 }
1078 }
1079
1080 static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1081 {
1082 struct nfs4_state_owner *sp;
1083 struct rb_node *pos;
1084 struct nfs4_state *state;
1085
1086 /* Reset all sequence ids to zero */
1087 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1088 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1089 sp->so_seqid.flags = 0;
1090 spin_lock(&sp->so_lock);
1091 list_for_each_entry(state, &sp->so_states, open_states) {
1092 if (mark_reclaim(clp, state))
1093 nfs4_clear_open_state(state);
1094 }
1095 spin_unlock(&sp->so_lock);
1096 }
1097 }
1098
1099 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1100 {
1101 /* Mark all delegations for reclaim */
1102 nfs_delegation_mark_reclaim(clp);
1103 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1104 }
1105
1106 static void nfs4_reclaim_complete(struct nfs_client *clp,
1107 const struct nfs4_state_recovery_ops *ops)
1108 {
1109 /* Notify the server we're done reclaiming our state */
1110 if (ops->reclaim_complete)
1111 (void)ops->reclaim_complete(clp);
1112 }
1113
1114 static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1115 {
1116 struct nfs4_state_owner *sp;
1117 struct rb_node *pos;
1118 struct nfs4_state *state;
1119
1120 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1121 return;
1122
1123 nfs4_reclaim_complete(clp,
1124 nfs4_reboot_recovery_ops[clp->cl_minorversion]);
1125
1126 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1127 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1128 spin_lock(&sp->so_lock);
1129 list_for_each_entry(state, &sp->so_states, open_states) {
1130 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
1131 continue;
1132 nfs4_state_mark_reclaim_nograce(clp, state);
1133 }
1134 spin_unlock(&sp->so_lock);
1135 }
1136
1137 nfs_delegation_reap_unclaimed(clp);
1138 }
1139
1140 static void nfs_delegation_clear_all(struct nfs_client *clp)
1141 {
1142 nfs_delegation_mark_reclaim(clp);
1143 nfs_delegation_reap_unclaimed(clp);
1144 }
1145
1146 static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1147 {
1148 nfs_delegation_clear_all(clp);
1149 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1150 }
1151
1152 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1153 {
1154 switch (error) {
1155 case -NFS4ERR_CB_PATH_DOWN:
1156 nfs_handle_cb_pathdown(clp);
1157 return 0;
1158 case -NFS4ERR_NO_GRACE:
1159 nfs4_state_end_reclaim_reboot(clp);
1160 return 0;
1161 case -NFS4ERR_STALE_CLIENTID:
1162 case -NFS4ERR_LEASE_MOVED:
1163 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1164 nfs4_state_end_reclaim_reboot(clp);
1165 nfs4_state_start_reclaim_reboot(clp);
1166 break;
1167 case -NFS4ERR_EXPIRED:
1168 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1169 nfs4_state_start_reclaim_nograce(clp);
1170 break;
1171 case -NFS4ERR_BADSESSION:
1172 case -NFS4ERR_BADSLOT:
1173 case -NFS4ERR_BAD_HIGH_SLOT:
1174 case -NFS4ERR_DEADSESSION:
1175 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1176 case -NFS4ERR_SEQ_FALSE_RETRY:
1177 case -NFS4ERR_SEQ_MISORDERED:
1178 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1179 /* Zero session reset errors */
1180 return 0;
1181 }
1182 return error;
1183 }
1184
1185 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1186 {
1187 struct rb_node *pos;
1188 int status = 0;
1189
1190 restart:
1191 spin_lock(&clp->cl_lock);
1192 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1193 struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1194 if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
1195 continue;
1196 atomic_inc(&sp->so_count);
1197 spin_unlock(&clp->cl_lock);
1198 status = nfs4_reclaim_open_state(sp, ops);
1199 if (status < 0) {
1200 set_bit(ops->owner_flag_bit, &sp->so_flags);
1201 nfs4_put_state_owner(sp);
1202 return nfs4_recovery_handle_error(clp, status);
1203 }
1204 nfs4_put_state_owner(sp);
1205 goto restart;
1206 }
1207 spin_unlock(&clp->cl_lock);
1208 return status;
1209 }
1210
1211 static int nfs4_check_lease(struct nfs_client *clp)
1212 {
1213 struct rpc_cred *cred;
1214 struct nfs4_state_maintenance_ops *ops =
1215 nfs4_state_renewal_ops[clp->cl_minorversion];
1216 int status = -NFS4ERR_EXPIRED;
1217
1218 /* Is the client already known to have an expired lease? */
1219 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1220 return 0;
1221 spin_lock(&clp->cl_lock);
1222 cred = ops->get_state_renewal_cred_locked(clp);
1223 spin_unlock(&clp->cl_lock);
1224 if (cred == NULL) {
1225 cred = nfs4_get_setclientid_cred(clp);
1226 if (cred == NULL)
1227 goto out;
1228 }
1229 status = ops->renew_lease(clp, cred);
1230 put_rpccred(cred);
1231 out:
1232 return nfs4_recovery_handle_error(clp, status);
1233 }
1234
1235 static int nfs4_reclaim_lease(struct nfs_client *clp)
1236 {
1237 struct rpc_cred *cred;
1238 struct nfs4_state_recovery_ops *ops =
1239 nfs4_reboot_recovery_ops[clp->cl_minorversion];
1240 int status = -ENOENT;
1241
1242 cred = ops->get_clid_cred(clp);
1243 if (cred != NULL) {
1244 status = ops->establish_clid(clp, cred);
1245 put_rpccred(cred);
1246 /* Handle case where the user hasn't set up machine creds */
1247 if (status == -EACCES && cred == clp->cl_machine_cred) {
1248 nfs4_clear_machine_cred(clp);
1249 status = -EAGAIN;
1250 }
1251 if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
1252 status = -EPROTONOSUPPORT;
1253 }
1254 return status;
1255 }
1256
1257 #ifdef CONFIG_NFS_V4_1
1258 void nfs41_handle_recall_slot(struct nfs_client *clp)
1259 {
1260 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1261 nfs4_schedule_state_recovery(clp);
1262 }
1263
1264 static void nfs4_reset_all_state(struct nfs_client *clp)
1265 {
1266 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1267 clp->cl_boot_time = CURRENT_TIME;
1268 nfs4_state_start_reclaim_nograce(clp);
1269 nfs4_schedule_state_recovery(clp);
1270 }
1271 }
1272
1273 static void nfs41_handle_server_reboot(struct nfs_client *clp)
1274 {
1275 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1276 nfs4_state_start_reclaim_reboot(clp);
1277 nfs4_schedule_state_recovery(clp);
1278 }
1279 }
1280
1281 static void nfs41_handle_state_revoked(struct nfs_client *clp)
1282 {
1283 /* Temporary */
1284 nfs4_reset_all_state(clp);
1285 }
1286
1287 static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
1288 {
1289 /* This will need to handle layouts too */
1290 nfs_expire_all_delegations(clp);
1291 }
1292
1293 static void nfs41_handle_cb_path_down(struct nfs_client *clp)
1294 {
1295 nfs_expire_all_delegations(clp);
1296 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
1297 nfs4_schedule_state_recovery(clp);
1298 }
1299
1300 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
1301 {
1302 if (!flags)
1303 return;
1304 else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
1305 nfs41_handle_server_reboot(clp);
1306 else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
1307 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
1308 SEQ4_STATUS_ADMIN_STATE_REVOKED |
1309 SEQ4_STATUS_LEASE_MOVED))
1310 nfs41_handle_state_revoked(clp);
1311 else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
1312 nfs41_handle_recallable_state_revoked(clp);
1313 else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
1314 SEQ4_STATUS_BACKCHANNEL_FAULT |
1315 SEQ4_STATUS_CB_PATH_DOWN_SESSION))
1316 nfs41_handle_cb_path_down(clp);
1317 }
1318
1319 static int nfs4_reset_session(struct nfs_client *clp)
1320 {
1321 int status;
1322
1323 nfs4_begin_drain_session(clp);
1324 status = nfs4_proc_destroy_session(clp->cl_session);
1325 if (status && status != -NFS4ERR_BADSESSION &&
1326 status != -NFS4ERR_DEADSESSION) {
1327 status = nfs4_recovery_handle_error(clp, status);
1328 goto out;
1329 }
1330
1331 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
1332 status = nfs4_proc_create_session(clp);
1333 if (status) {
1334 status = nfs4_recovery_handle_error(clp, status);
1335 goto out;
1336 }
1337 /* create_session negotiated new slot table */
1338 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1339
1340 /* Let the state manager reestablish state */
1341 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1342 nfs41_setup_state_renewal(clp);
1343 out:
1344 return status;
1345 }
1346
1347 static int nfs4_recall_slot(struct nfs_client *clp)
1348 {
1349 struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table;
1350 struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs;
1351 struct nfs4_slot *new, *old;
1352 int i;
1353
1354 nfs4_begin_drain_session(clp);
1355 new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
1356 GFP_NOFS);
1357 if (!new)
1358 return -ENOMEM;
1359
1360 spin_lock(&fc_tbl->slot_tbl_lock);
1361 for (i = 0; i < fc_tbl->target_max_slots; i++)
1362 new[i].seq_nr = fc_tbl->slots[i].seq_nr;
1363 old = fc_tbl->slots;
1364 fc_tbl->slots = new;
1365 fc_tbl->max_slots = fc_tbl->target_max_slots;
1366 fc_tbl->target_max_slots = 0;
1367 fc_attrs->max_reqs = fc_tbl->max_slots;
1368 spin_unlock(&fc_tbl->slot_tbl_lock);
1369
1370 kfree(old);
1371 nfs4_end_drain_session(clp);
1372 return 0;
1373 }
1374
1375 #else /* CONFIG_NFS_V4_1 */
1376 static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
1377 static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
1378 static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
1379 #endif /* CONFIG_NFS_V4_1 */
1380
1381 /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
1382 * on EXCHANGE_ID for v4.1
1383 */
1384 static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
1385 {
1386 if (nfs4_has_session(clp)) {
1387 switch (status) {
1388 case -NFS4ERR_DELAY:
1389 case -NFS4ERR_CLID_INUSE:
1390 case -EAGAIN:
1391 case -EKEYEXPIRED:
1392 break;
1393
1394 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1395 * in nfs4_exchange_id */
1396 default:
1397 return;
1398 }
1399 }
1400 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1401 }
1402
1403 static void nfs4_state_manager(struct nfs_client *clp)
1404 {
1405 int status = 0;
1406
1407 /* Ensure exclusive access to NFSv4 state */
1408 for(;;) {
1409 if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1410 /* We're going to have to re-establish a clientid */
1411 status = nfs4_reclaim_lease(clp);
1412 if (status) {
1413 nfs4_set_lease_expired(clp, status);
1414 if (test_bit(NFS4CLNT_LEASE_EXPIRED,
1415 &clp->cl_state))
1416 continue;
1417 if (clp->cl_cons_state ==
1418 NFS_CS_SESSION_INITING)
1419 nfs_mark_client_ready(clp, status);
1420 goto out_error;
1421 }
1422 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1423 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1424 }
1425
1426 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
1427 status = nfs4_check_lease(clp);
1428 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1429 continue;
1430 if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
1431 goto out_error;
1432 }
1433
1434 /* Initialize or reset the session */
1435 if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
1436 && nfs4_has_session(clp)) {
1437 status = nfs4_reset_session(clp);
1438 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1439 continue;
1440 if (status < 0)
1441 goto out_error;
1442 }
1443
1444 /* First recover reboot state... */
1445 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
1446 status = nfs4_do_reclaim(clp,
1447 nfs4_reboot_recovery_ops[clp->cl_minorversion]);
1448 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1449 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
1450 continue;
1451 nfs4_state_end_reclaim_reboot(clp);
1452 if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
1453 continue;
1454 if (status < 0)
1455 goto out_error;
1456 }
1457
1458 /* Now recover expired state... */
1459 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
1460 status = nfs4_do_reclaim(clp,
1461 nfs4_nograce_recovery_ops[clp->cl_minorversion]);
1462 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1463 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
1464 test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1465 continue;
1466 if (status < 0)
1467 goto out_error;
1468 }
1469
1470 nfs4_end_drain_session(clp);
1471 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
1472 nfs_client_return_marked_delegations(clp);
1473 continue;
1474 }
1475 /* Recall session slots */
1476 if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)
1477 && nfs4_has_session(clp)) {
1478 status = nfs4_recall_slot(clp);
1479 if (status < 0)
1480 goto out_error;
1481 continue;
1482 }
1483
1484
1485 nfs4_clear_state_manager_bit(clp);
1486 /* Did we race with an attempt to give us more work? */
1487 if (clp->cl_state == 0)
1488 break;
1489 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1490 break;
1491 }
1492 return;
1493 out_error:
1494 printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
1495 " with error %d\n", clp->cl_hostname, -status);
1496 nfs4_end_drain_session(clp);
1497 nfs4_clear_state_manager_bit(clp);
1498 }
1499
1500 static int nfs4_run_state_manager(void *ptr)
1501 {
1502 struct nfs_client *clp = ptr;
1503
1504 allow_signal(SIGKILL);
1505 nfs4_state_manager(clp);
1506 nfs_put_client(clp);
1507 module_put_and_exit(0);
1508 return 0;
1509 }
1510
1511 /*
1512 * Local variables:
1513 * c-basic-offset: 8
1514 * End:
1515 */