]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * fs/nfs/nfs4state.c | |
3 | * | |
4 | * Client-side XDR for NFSv4. | |
5 | * | |
6 | * Copyright (c) 2002 The Regents of the University of Michigan. | |
7 | * All rights reserved. | |
8 | * | |
9 | * Kendrick Smith <kmsmith@umich.edu> | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or without | |
12 | * modification, are permitted provided that the following conditions | |
13 | * are met: | |
14 | * | |
15 | * 1. Redistributions of source code must retain the above copyright | |
16 | * notice, this list of conditions and the following disclaimer. | |
17 | * 2. Redistributions in binary form must reproduce the above copyright | |
18 | * notice, this list of conditions and the following disclaimer in the | |
19 | * documentation and/or other materials provided with the distribution. | |
20 | * 3. Neither the name of the University nor the names of its | |
21 | * contributors may be used to endorse or promote products derived | |
22 | * from this software without specific prior written permission. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | |
25 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | |
26 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
27 | * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | |
31 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | |
32 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | |
33 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
34 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
35 | * | |
36 | * Implementation of the NFSv4 state model. For the time being, | |
37 | * this is minimal, but will be made much more complex in a | |
38 | * subsequent patch. | |
39 | */ | |
40 | ||
41 | #include <linux/kernel.h> | |
42 | #include <linux/slab.h> | |
43 | #include <linux/smp_lock.h> | |
44 | #include <linux/nfs_fs.h> | |
45 | #include <linux/nfs_idmap.h> | |
46 | #include <linux/kthread.h> | |
47 | #include <linux/module.h> | |
48 | #include <linux/random.h> | |
49 | #include <linux/workqueue.h> | |
50 | #include <linux/bitops.h> | |
51 | ||
52 | #include "nfs4_fs.h" | |
53 | #include "callback.h" | |
54 | #include "delegation.h" | |
55 | #include "internal.h" | |
56 | ||
57 | #define OPENOWNER_POOL_SIZE 8 | |
58 | ||
59 | const nfs4_stateid zero_stateid; | |
60 | ||
61 | static LIST_HEAD(nfs4_clientid_list); | |
62 | ||
63 | static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred) | |
64 | { | |
65 | unsigned short port; | |
66 | int status; | |
67 | ||
68 | port = nfs_callback_tcpport; | |
69 | if (clp->cl_addr.ss_family == AF_INET6) | |
70 | port = nfs_callback_tcpport6; | |
71 | ||
72 | status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred); | |
73 | if (status == 0) | |
74 | status = nfs4_proc_setclientid_confirm(clp, cred); | |
75 | if (status == 0) | |
76 | nfs4_schedule_state_renewal(clp); | |
77 | return status; | |
78 | } | |
79 | ||
80 | static struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp) | |
81 | { | |
82 | struct rpc_cred *cred = NULL; | |
83 | ||
84 | if (clp->cl_machine_cred != NULL) | |
85 | cred = get_rpccred(clp->cl_machine_cred); | |
86 | return cred; | |
87 | } | |
88 | ||
89 | static void nfs4_clear_machine_cred(struct nfs_client *clp) | |
90 | { | |
91 | struct rpc_cred *cred; | |
92 | ||
93 | spin_lock(&clp->cl_lock); | |
94 | cred = clp->cl_machine_cred; | |
95 | clp->cl_machine_cred = NULL; | |
96 | spin_unlock(&clp->cl_lock); | |
97 | if (cred != NULL) | |
98 | put_rpccred(cred); | |
99 | } | |
100 | ||
101 | struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) | |
102 | { | |
103 | struct nfs4_state_owner *sp; | |
104 | struct rb_node *pos; | |
105 | struct rpc_cred *cred = NULL; | |
106 | ||
107 | for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { | |
108 | sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); | |
109 | if (list_empty(&sp->so_states)) | |
110 | continue; | |
111 | cred = get_rpccred(sp->so_cred); | |
112 | break; | |
113 | } | |
114 | return cred; | |
115 | } | |
116 | ||
117 | static struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) | |
118 | { | |
119 | struct rpc_cred *cred; | |
120 | ||
121 | spin_lock(&clp->cl_lock); | |
122 | cred = nfs4_get_renew_cred_locked(clp); | |
123 | spin_unlock(&clp->cl_lock); | |
124 | return cred; | |
125 | } | |
126 | ||
127 | static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) | |
128 | { | |
129 | struct nfs4_state_owner *sp; | |
130 | struct rb_node *pos; | |
131 | struct rpc_cred *cred; | |
132 | ||
133 | spin_lock(&clp->cl_lock); | |
134 | cred = nfs4_get_machine_cred_locked(clp); | |
135 | if (cred != NULL) | |
136 | goto out; | |
137 | pos = rb_first(&clp->cl_state_owners); | |
138 | if (pos != NULL) { | |
139 | sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); | |
140 | cred = get_rpccred(sp->so_cred); | |
141 | } | |
142 | out: | |
143 | spin_unlock(&clp->cl_lock); | |
144 | return cred; | |
145 | } | |
146 | ||
147 | static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new, | |
148 | __u64 minval, int maxbits) | |
149 | { | |
150 | struct rb_node **p, *parent; | |
151 | struct nfs_unique_id *pos; | |
152 | __u64 mask = ~0ULL; | |
153 | ||
154 | if (maxbits < 64) | |
155 | mask = (1ULL << maxbits) - 1ULL; | |
156 | ||
157 | /* Ensure distribution is more or less flat */ | |
158 | get_random_bytes(&new->id, sizeof(new->id)); | |
159 | new->id &= mask; | |
160 | if (new->id < minval) | |
161 | new->id += minval; | |
162 | retry: | |
163 | p = &root->rb_node; | |
164 | parent = NULL; | |
165 | ||
166 | while (*p != NULL) { | |
167 | parent = *p; | |
168 | pos = rb_entry(parent, struct nfs_unique_id, rb_node); | |
169 | ||
170 | if (new->id < pos->id) | |
171 | p = &(*p)->rb_left; | |
172 | else if (new->id > pos->id) | |
173 | p = &(*p)->rb_right; | |
174 | else | |
175 | goto id_exists; | |
176 | } | |
177 | rb_link_node(&new->rb_node, parent, p); | |
178 | rb_insert_color(&new->rb_node, root); | |
179 | return; | |
180 | id_exists: | |
181 | for (;;) { | |
182 | new->id++; | |
183 | if (new->id < minval || (new->id & mask) != new->id) { | |
184 | new->id = minval; | |
185 | break; | |
186 | } | |
187 | parent = rb_next(parent); | |
188 | if (parent == NULL) | |
189 | break; | |
190 | pos = rb_entry(parent, struct nfs_unique_id, rb_node); | |
191 | if (new->id < pos->id) | |
192 | break; | |
193 | } | |
194 | goto retry; | |
195 | } | |
196 | ||
197 | static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id) | |
198 | { | |
199 | rb_erase(&id->rb_node, root); | |
200 | } | |
201 | ||
202 | static struct nfs4_state_owner * | |
203 | nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred) | |
204 | { | |
205 | struct nfs_client *clp = server->nfs_client; | |
206 | struct rb_node **p = &clp->cl_state_owners.rb_node, | |
207 | *parent = NULL; | |
208 | struct nfs4_state_owner *sp, *res = NULL; | |
209 | ||
210 | while (*p != NULL) { | |
211 | parent = *p; | |
212 | sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); | |
213 | ||
214 | if (server < sp->so_server) { | |
215 | p = &parent->rb_left; | |
216 | continue; | |
217 | } | |
218 | if (server > sp->so_server) { | |
219 | p = &parent->rb_right; | |
220 | continue; | |
221 | } | |
222 | if (cred < sp->so_cred) | |
223 | p = &parent->rb_left; | |
224 | else if (cred > sp->so_cred) | |
225 | p = &parent->rb_right; | |
226 | else { | |
227 | atomic_inc(&sp->so_count); | |
228 | res = sp; | |
229 | break; | |
230 | } | |
231 | } | |
232 | return res; | |
233 | } | |
234 | ||
235 | static struct nfs4_state_owner * | |
236 | nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new) | |
237 | { | |
238 | struct rb_node **p = &clp->cl_state_owners.rb_node, | |
239 | *parent = NULL; | |
240 | struct nfs4_state_owner *sp; | |
241 | ||
242 | while (*p != NULL) { | |
243 | parent = *p; | |
244 | sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); | |
245 | ||
246 | if (new->so_server < sp->so_server) { | |
247 | p = &parent->rb_left; | |
248 | continue; | |
249 | } | |
250 | if (new->so_server > sp->so_server) { | |
251 | p = &parent->rb_right; | |
252 | continue; | |
253 | } | |
254 | if (new->so_cred < sp->so_cred) | |
255 | p = &parent->rb_left; | |
256 | else if (new->so_cred > sp->so_cred) | |
257 | p = &parent->rb_right; | |
258 | else { | |
259 | atomic_inc(&sp->so_count); | |
260 | return sp; | |
261 | } | |
262 | } | |
263 | nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64); | |
264 | rb_link_node(&new->so_client_node, parent, p); | |
265 | rb_insert_color(&new->so_client_node, &clp->cl_state_owners); | |
266 | return new; | |
267 | } | |
268 | ||
269 | static void | |
270 | nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp) | |
271 | { | |
272 | if (!RB_EMPTY_NODE(&sp->so_client_node)) | |
273 | rb_erase(&sp->so_client_node, &clp->cl_state_owners); | |
274 | nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id); | |
275 | } | |
276 | ||
277 | /* | |
278 | * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to | |
279 | * create a new state_owner. | |
280 | * | |
281 | */ | |
282 | static struct nfs4_state_owner * | |
283 | nfs4_alloc_state_owner(void) | |
284 | { | |
285 | struct nfs4_state_owner *sp; | |
286 | ||
287 | sp = kzalloc(sizeof(*sp),GFP_KERNEL); | |
288 | if (!sp) | |
289 | return NULL; | |
290 | spin_lock_init(&sp->so_lock); | |
291 | INIT_LIST_HEAD(&sp->so_states); | |
292 | INIT_LIST_HEAD(&sp->so_delegations); | |
293 | rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue"); | |
294 | sp->so_seqid.sequence = &sp->so_sequence; | |
295 | spin_lock_init(&sp->so_sequence.lock); | |
296 | INIT_LIST_HEAD(&sp->so_sequence.list); | |
297 | atomic_set(&sp->so_count, 1); | |
298 | return sp; | |
299 | } | |
300 | ||
301 | static void | |
302 | nfs4_drop_state_owner(struct nfs4_state_owner *sp) | |
303 | { | |
304 | if (!RB_EMPTY_NODE(&sp->so_client_node)) { | |
305 | struct nfs_client *clp = sp->so_client; | |
306 | ||
307 | spin_lock(&clp->cl_lock); | |
308 | rb_erase(&sp->so_client_node, &clp->cl_state_owners); | |
309 | RB_CLEAR_NODE(&sp->so_client_node); | |
310 | spin_unlock(&clp->cl_lock); | |
311 | } | |
312 | } | |
313 | ||
314 | struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred) | |
315 | { | |
316 | struct nfs_client *clp = server->nfs_client; | |
317 | struct nfs4_state_owner *sp, *new; | |
318 | ||
319 | spin_lock(&clp->cl_lock); | |
320 | sp = nfs4_find_state_owner(server, cred); | |
321 | spin_unlock(&clp->cl_lock); | |
322 | if (sp != NULL) | |
323 | return sp; | |
324 | new = nfs4_alloc_state_owner(); | |
325 | if (new == NULL) | |
326 | return NULL; | |
327 | new->so_client = clp; | |
328 | new->so_server = server; | |
329 | new->so_cred = cred; | |
330 | spin_lock(&clp->cl_lock); | |
331 | sp = nfs4_insert_state_owner(clp, new); | |
332 | spin_unlock(&clp->cl_lock); | |
333 | if (sp == new) | |
334 | get_rpccred(cred); | |
335 | else { | |
336 | rpc_destroy_wait_queue(&new->so_sequence.wait); | |
337 | kfree(new); | |
338 | } | |
339 | return sp; | |
340 | } | |
341 | ||
342 | void nfs4_put_state_owner(struct nfs4_state_owner *sp) | |
343 | { | |
344 | struct nfs_client *clp = sp->so_client; | |
345 | struct rpc_cred *cred = sp->so_cred; | |
346 | ||
347 | if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) | |
348 | return; | |
349 | nfs4_remove_state_owner(clp, sp); | |
350 | spin_unlock(&clp->cl_lock); | |
351 | rpc_destroy_wait_queue(&sp->so_sequence.wait); | |
352 | put_rpccred(cred); | |
353 | kfree(sp); | |
354 | } | |
355 | ||
356 | static struct nfs4_state * | |
357 | nfs4_alloc_open_state(void) | |
358 | { | |
359 | struct nfs4_state *state; | |
360 | ||
361 | state = kzalloc(sizeof(*state), GFP_KERNEL); | |
362 | if (!state) | |
363 | return NULL; | |
364 | atomic_set(&state->count, 1); | |
365 | INIT_LIST_HEAD(&state->lock_states); | |
366 | spin_lock_init(&state->state_lock); | |
367 | seqlock_init(&state->seqlock); | |
368 | return state; | |
369 | } | |
370 | ||
371 | void | |
372 | nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode) | |
373 | { | |
374 | if (state->state == fmode) | |
375 | return; | |
376 | /* NB! List reordering - see the reclaim code for why. */ | |
377 | if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) { | |
378 | if (fmode & FMODE_WRITE) | |
379 | list_move(&state->open_states, &state->owner->so_states); | |
380 | else | |
381 | list_move_tail(&state->open_states, &state->owner->so_states); | |
382 | } | |
383 | state->state = fmode; | |
384 | } | |
385 | ||
386 | static struct nfs4_state * | |
387 | __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner) | |
388 | { | |
389 | struct nfs_inode *nfsi = NFS_I(inode); | |
390 | struct nfs4_state *state; | |
391 | ||
392 | list_for_each_entry(state, &nfsi->open_states, inode_states) { | |
393 | if (state->owner != owner) | |
394 | continue; | |
395 | if (atomic_inc_not_zero(&state->count)) | |
396 | return state; | |
397 | } | |
398 | return NULL; | |
399 | } | |
400 | ||
401 | static void | |
402 | nfs4_free_open_state(struct nfs4_state *state) | |
403 | { | |
404 | kfree(state); | |
405 | } | |
406 | ||
407 | struct nfs4_state * | |
408 | nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) | |
409 | { | |
410 | struct nfs4_state *state, *new; | |
411 | struct nfs_inode *nfsi = NFS_I(inode); | |
412 | ||
413 | spin_lock(&inode->i_lock); | |
414 | state = __nfs4_find_state_byowner(inode, owner); | |
415 | spin_unlock(&inode->i_lock); | |
416 | if (state) | |
417 | goto out; | |
418 | new = nfs4_alloc_open_state(); | |
419 | spin_lock(&owner->so_lock); | |
420 | spin_lock(&inode->i_lock); | |
421 | state = __nfs4_find_state_byowner(inode, owner); | |
422 | if (state == NULL && new != NULL) { | |
423 | state = new; | |
424 | state->owner = owner; | |
425 | atomic_inc(&owner->so_count); | |
426 | list_add(&state->inode_states, &nfsi->open_states); | |
427 | state->inode = igrab(inode); | |
428 | spin_unlock(&inode->i_lock); | |
429 | /* Note: The reclaim code dictates that we add stateless | |
430 | * and read-only stateids to the end of the list */ | |
431 | list_add_tail(&state->open_states, &owner->so_states); | |
432 | spin_unlock(&owner->so_lock); | |
433 | } else { | |
434 | spin_unlock(&inode->i_lock); | |
435 | spin_unlock(&owner->so_lock); | |
436 | if (new) | |
437 | nfs4_free_open_state(new); | |
438 | } | |
439 | out: | |
440 | return state; | |
441 | } | |
442 | ||
443 | void nfs4_put_open_state(struct nfs4_state *state) | |
444 | { | |
445 | struct inode *inode = state->inode; | |
446 | struct nfs4_state_owner *owner = state->owner; | |
447 | ||
448 | if (!atomic_dec_and_lock(&state->count, &owner->so_lock)) | |
449 | return; | |
450 | spin_lock(&inode->i_lock); | |
451 | list_del(&state->inode_states); | |
452 | list_del(&state->open_states); | |
453 | spin_unlock(&inode->i_lock); | |
454 | spin_unlock(&owner->so_lock); | |
455 | iput(inode); | |
456 | nfs4_free_open_state(state); | |
457 | nfs4_put_state_owner(owner); | |
458 | } | |
459 | ||
460 | /* | |
461 | * Close the current file. | |
462 | */ | |
463 | static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait) | |
464 | { | |
465 | struct nfs4_state_owner *owner = state->owner; | |
466 | int call_close = 0; | |
467 | fmode_t newstate; | |
468 | ||
469 | atomic_inc(&owner->so_count); | |
470 | /* Protect against nfs4_find_state() */ | |
471 | spin_lock(&owner->so_lock); | |
472 | switch (fmode & (FMODE_READ | FMODE_WRITE)) { | |
473 | case FMODE_READ: | |
474 | state->n_rdonly--; | |
475 | break; | |
476 | case FMODE_WRITE: | |
477 | state->n_wronly--; | |
478 | break; | |
479 | case FMODE_READ|FMODE_WRITE: | |
480 | state->n_rdwr--; | |
481 | } | |
482 | newstate = FMODE_READ|FMODE_WRITE; | |
483 | if (state->n_rdwr == 0) { | |
484 | if (state->n_rdonly == 0) { | |
485 | newstate &= ~FMODE_READ; | |
486 | call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); | |
487 | call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); | |
488 | } | |
489 | if (state->n_wronly == 0) { | |
490 | newstate &= ~FMODE_WRITE; | |
491 | call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); | |
492 | call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); | |
493 | } | |
494 | if (newstate == 0) | |
495 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | |
496 | } | |
497 | nfs4_state_set_mode_locked(state, newstate); | |
498 | spin_unlock(&owner->so_lock); | |
499 | ||
500 | if (!call_close) { | |
501 | nfs4_put_open_state(state); | |
502 | nfs4_put_state_owner(owner); | |
503 | } else | |
504 | nfs4_do_close(path, state, wait); | |
505 | } | |
506 | ||
507 | void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) | |
508 | { | |
509 | __nfs4_close(path, state, fmode, 0); | |
510 | } | |
511 | ||
512 | void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode) | |
513 | { | |
514 | __nfs4_close(path, state, fmode, 1); | |
515 | } | |
516 | ||
517 | /* | |
518 | * Search the state->lock_states for an existing lock_owner | |
519 | * that is compatible with current->files | |
520 | */ | |
521 | static struct nfs4_lock_state * | |
522 | __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) | |
523 | { | |
524 | struct nfs4_lock_state *pos; | |
525 | list_for_each_entry(pos, &state->lock_states, ls_locks) { | |
526 | if (pos->ls_owner != fl_owner) | |
527 | continue; | |
528 | atomic_inc(&pos->ls_count); | |
529 | return pos; | |
530 | } | |
531 | return NULL; | |
532 | } | |
533 | ||
534 | /* | |
535 | * Return a compatible lock_state. If no initialized lock_state structure | |
536 | * exists, return an uninitialized one. | |
537 | * | |
538 | */ | |
539 | static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) | |
540 | { | |
541 | struct nfs4_lock_state *lsp; | |
542 | struct nfs_client *clp = state->owner->so_client; | |
543 | ||
544 | lsp = kzalloc(sizeof(*lsp), GFP_KERNEL); | |
545 | if (lsp == NULL) | |
546 | return NULL; | |
547 | rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue"); | |
548 | spin_lock_init(&lsp->ls_sequence.lock); | |
549 | INIT_LIST_HEAD(&lsp->ls_sequence.list); | |
550 | lsp->ls_seqid.sequence = &lsp->ls_sequence; | |
551 | atomic_set(&lsp->ls_count, 1); | |
552 | lsp->ls_owner = fl_owner; | |
553 | spin_lock(&clp->cl_lock); | |
554 | nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64); | |
555 | spin_unlock(&clp->cl_lock); | |
556 | INIT_LIST_HEAD(&lsp->ls_locks); | |
557 | return lsp; | |
558 | } | |
559 | ||
560 | static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) | |
561 | { | |
562 | struct nfs_client *clp = lsp->ls_state->owner->so_client; | |
563 | ||
564 | spin_lock(&clp->cl_lock); | |
565 | nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id); | |
566 | spin_unlock(&clp->cl_lock); | |
567 | rpc_destroy_wait_queue(&lsp->ls_sequence.wait); | |
568 | kfree(lsp); | |
569 | } | |
570 | ||
571 | /* | |
572 | * Return a compatible lock_state. If no initialized lock_state structure | |
573 | * exists, return an uninitialized one. | |
574 | * | |
575 | */ | |
576 | static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) | |
577 | { | |
578 | struct nfs4_lock_state *lsp, *new = NULL; | |
579 | ||
580 | for(;;) { | |
581 | spin_lock(&state->state_lock); | |
582 | lsp = __nfs4_find_lock_state(state, owner); | |
583 | if (lsp != NULL) | |
584 | break; | |
585 | if (new != NULL) { | |
586 | new->ls_state = state; | |
587 | list_add(&new->ls_locks, &state->lock_states); | |
588 | set_bit(LK_STATE_IN_USE, &state->flags); | |
589 | lsp = new; | |
590 | new = NULL; | |
591 | break; | |
592 | } | |
593 | spin_unlock(&state->state_lock); | |
594 | new = nfs4_alloc_lock_state(state, owner); | |
595 | if (new == NULL) | |
596 | return NULL; | |
597 | } | |
598 | spin_unlock(&state->state_lock); | |
599 | if (new != NULL) | |
600 | nfs4_free_lock_state(new); | |
601 | return lsp; | |
602 | } | |
603 | ||
604 | /* | |
605 | * Release reference to lock_state, and free it if we see that | |
606 | * it is no longer in use | |
607 | */ | |
608 | void nfs4_put_lock_state(struct nfs4_lock_state *lsp) | |
609 | { | |
610 | struct nfs4_state *state; | |
611 | ||
612 | if (lsp == NULL) | |
613 | return; | |
614 | state = lsp->ls_state; | |
615 | if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock)) | |
616 | return; | |
617 | list_del(&lsp->ls_locks); | |
618 | if (list_empty(&state->lock_states)) | |
619 | clear_bit(LK_STATE_IN_USE, &state->flags); | |
620 | spin_unlock(&state->state_lock); | |
621 | nfs4_free_lock_state(lsp); | |
622 | } | |
623 | ||
624 | static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) | |
625 | { | |
626 | struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner; | |
627 | ||
628 | dst->fl_u.nfs4_fl.owner = lsp; | |
629 | atomic_inc(&lsp->ls_count); | |
630 | } | |
631 | ||
632 | static void nfs4_fl_release_lock(struct file_lock *fl) | |
633 | { | |
634 | nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); | |
635 | } | |
636 | ||
637 | static struct file_lock_operations nfs4_fl_lock_ops = { | |
638 | .fl_copy_lock = nfs4_fl_copy_lock, | |
639 | .fl_release_private = nfs4_fl_release_lock, | |
640 | }; | |
641 | ||
642 | int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) | |
643 | { | |
644 | struct nfs4_lock_state *lsp; | |
645 | ||
646 | if (fl->fl_ops != NULL) | |
647 | return 0; | |
648 | lsp = nfs4_get_lock_state(state, fl->fl_owner); | |
649 | if (lsp == NULL) | |
650 | return -ENOMEM; | |
651 | fl->fl_u.nfs4_fl.owner = lsp; | |
652 | fl->fl_ops = &nfs4_fl_lock_ops; | |
653 | return 0; | |
654 | } | |
655 | ||
656 | /* | |
657 | * Byte-range lock aware utility to initialize the stateid of read/write | |
658 | * requests. | |
659 | */ | |
660 | void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) | |
661 | { | |
662 | struct nfs4_lock_state *lsp; | |
663 | int seq; | |
664 | ||
665 | do { | |
666 | seq = read_seqbegin(&state->seqlock); | |
667 | memcpy(dst, &state->stateid, sizeof(*dst)); | |
668 | } while (read_seqretry(&state->seqlock, seq)); | |
669 | if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) | |
670 | return; | |
671 | ||
672 | spin_lock(&state->state_lock); | |
673 | lsp = __nfs4_find_lock_state(state, fl_owner); | |
674 | if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) | |
675 | memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); | |
676 | spin_unlock(&state->state_lock); | |
677 | nfs4_put_lock_state(lsp); | |
678 | } | |
679 | ||
680 | struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter) | |
681 | { | |
682 | struct nfs_seqid *new; | |
683 | ||
684 | new = kmalloc(sizeof(*new), GFP_KERNEL); | |
685 | if (new != NULL) { | |
686 | new->sequence = counter; | |
687 | INIT_LIST_HEAD(&new->list); | |
688 | } | |
689 | return new; | |
690 | } | |
691 | ||
692 | void nfs_free_seqid(struct nfs_seqid *seqid) | |
693 | { | |
694 | if (!list_empty(&seqid->list)) { | |
695 | struct rpc_sequence *sequence = seqid->sequence->sequence; | |
696 | ||
697 | spin_lock(&sequence->lock); | |
698 | list_del(&seqid->list); | |
699 | spin_unlock(&sequence->lock); | |
700 | rpc_wake_up(&sequence->wait); | |
701 | } | |
702 | kfree(seqid); | |
703 | } | |
704 | ||
705 | /* | |
706 | * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or | |
707 | * failed with a seqid incrementing error - | |
708 | * see comments nfs_fs.h:seqid_mutating_error() | |
709 | */ | |
710 | static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) | |
711 | { | |
712 | BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid); | |
713 | switch (status) { | |
714 | case 0: | |
715 | break; | |
716 | case -NFS4ERR_BAD_SEQID: | |
717 | if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) | |
718 | return; | |
719 | printk(KERN_WARNING "NFS: v4 server returned a bad" | |
720 | " sequence-id error on an" | |
721 | " unconfirmed sequence %p!\n", | |
722 | seqid->sequence); | |
723 | case -NFS4ERR_STALE_CLIENTID: | |
724 | case -NFS4ERR_STALE_STATEID: | |
725 | case -NFS4ERR_BAD_STATEID: | |
726 | case -NFS4ERR_BADXDR: | |
727 | case -NFS4ERR_RESOURCE: | |
728 | case -NFS4ERR_NOFILEHANDLE: | |
729 | /* Non-seqid mutating errors */ | |
730 | return; | |
731 | }; | |
732 | /* | |
733 | * Note: no locking needed as we are guaranteed to be first | |
734 | * on the sequence list | |
735 | */ | |
736 | seqid->sequence->counter++; | |
737 | } | |
738 | ||
739 | void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) | |
740 | { | |
741 | if (status == -NFS4ERR_BAD_SEQID) { | |
742 | struct nfs4_state_owner *sp = container_of(seqid->sequence, | |
743 | struct nfs4_state_owner, so_seqid); | |
744 | nfs4_drop_state_owner(sp); | |
745 | } | |
746 | nfs_increment_seqid(status, seqid); | |
747 | } | |
748 | ||
749 | /* | |
750 | * Increment the seqid if the LOCK/LOCKU succeeded, or | |
751 | * failed with a seqid incrementing error - | |
752 | * see comments nfs_fs.h:seqid_mutating_error() | |
753 | */ | |
754 | void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) | |
755 | { | |
756 | nfs_increment_seqid(status, seqid); | |
757 | } | |
758 | ||
759 | int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) | |
760 | { | |
761 | struct rpc_sequence *sequence = seqid->sequence->sequence; | |
762 | int status = 0; | |
763 | ||
764 | spin_lock(&sequence->lock); | |
765 | if (list_empty(&seqid->list)) | |
766 | list_add_tail(&seqid->list, &sequence->list); | |
767 | if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid) | |
768 | goto unlock; | |
769 | rpc_sleep_on(&sequence->wait, task, NULL); | |
770 | status = -EAGAIN; | |
771 | unlock: | |
772 | spin_unlock(&sequence->lock); | |
773 | return status; | |
774 | } | |
775 | ||
776 | static int nfs4_run_state_manager(void *); | |
777 | ||
778 | static void nfs4_clear_state_manager_bit(struct nfs_client *clp) | |
779 | { | |
780 | smp_mb__before_clear_bit(); | |
781 | clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); | |
782 | smp_mb__after_clear_bit(); | |
783 | wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING); | |
784 | rpc_wake_up(&clp->cl_rpcwaitq); | |
785 | } | |
786 | ||
787 | /* | |
788 | * Schedule the nfs_client asynchronous state management routine | |
789 | */ | |
790 | void nfs4_schedule_state_manager(struct nfs_client *clp) | |
791 | { | |
792 | struct task_struct *task; | |
793 | ||
794 | if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) | |
795 | return; | |
796 | __module_get(THIS_MODULE); | |
797 | atomic_inc(&clp->cl_count); | |
798 | task = kthread_run(nfs4_run_state_manager, clp, "%s-manager", | |
799 | rpc_peeraddr2str(clp->cl_rpcclient, | |
800 | RPC_DISPLAY_ADDR)); | |
801 | if (!IS_ERR(task)) | |
802 | return; | |
803 | nfs4_clear_state_manager_bit(clp); | |
804 | nfs_put_client(clp); | |
805 | module_put(THIS_MODULE); | |
806 | } | |
807 | ||
808 | /* | |
809 | * Schedule a state recovery attempt | |
810 | */ | |
811 | void nfs4_schedule_state_recovery(struct nfs_client *clp) | |
812 | { | |
813 | if (!clp) | |
814 | return; | |
815 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) | |
816 | set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); | |
817 | nfs4_schedule_state_manager(clp); | |
818 | } | |
819 | ||
820 | static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) | |
821 | { | |
822 | ||
823 | set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); | |
824 | /* Don't recover state that expired before the reboot */ | |
825 | if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) { | |
826 | clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); | |
827 | return 0; | |
828 | } | |
829 | set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags); | |
830 | set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); | |
831 | return 1; | |
832 | } | |
833 | ||
834 | int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) | |
835 | { | |
836 | set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); | |
837 | clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); | |
838 | set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags); | |
839 | set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); | |
840 | return 1; | |
841 | } | |
842 | ||
843 | static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) | |
844 | { | |
845 | struct inode *inode = state->inode; | |
846 | struct nfs_inode *nfsi = NFS_I(inode); | |
847 | struct file_lock *fl; | |
848 | int status = 0; | |
849 | ||
850 | down_write(&nfsi->rwsem); | |
851 | for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { | |
852 | if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) | |
853 | continue; | |
854 | if (nfs_file_open_context(fl->fl_file)->state != state) | |
855 | continue; | |
856 | status = ops->recover_lock(state, fl); | |
857 | if (status >= 0) | |
858 | continue; | |
859 | switch (status) { | |
860 | default: | |
861 | printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", | |
862 | __func__, status); | |
863 | case -NFS4ERR_EXPIRED: | |
864 | case -NFS4ERR_NO_GRACE: | |
865 | case -NFS4ERR_RECLAIM_BAD: | |
866 | case -NFS4ERR_RECLAIM_CONFLICT: | |
867 | /* kill_proc(fl->fl_pid, SIGLOST, 1); */ | |
868 | break; | |
869 | case -NFS4ERR_STALE_CLIENTID: | |
870 | goto out_err; | |
871 | } | |
872 | } | |
873 | up_write(&nfsi->rwsem); | |
874 | return 0; | |
875 | out_err: | |
876 | up_write(&nfsi->rwsem); | |
877 | return status; | |
878 | } | |
879 | ||
880 | static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops) | |
881 | { | |
882 | struct nfs4_state *state; | |
883 | struct nfs4_lock_state *lock; | |
884 | int status = 0; | |
885 | ||
886 | /* Note: we rely on the sp->so_states list being ordered | |
887 | * so that we always reclaim open(O_RDWR) and/or open(O_WRITE) | |
888 | * states first. | |
889 | * This is needed to ensure that the server won't give us any | |
890 | * read delegations that we have to return if, say, we are | |
891 | * recovering after a network partition or a reboot from a | |
892 | * server that doesn't support a grace period. | |
893 | */ | |
894 | restart: | |
895 | spin_lock(&sp->so_lock); | |
896 | list_for_each_entry(state, &sp->so_states, open_states) { | |
897 | if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) | |
898 | continue; | |
899 | if (state->state == 0) | |
900 | continue; | |
901 | atomic_inc(&state->count); | |
902 | spin_unlock(&sp->so_lock); | |
903 | status = ops->recover_open(sp, state); | |
904 | if (status >= 0) { | |
905 | status = nfs4_reclaim_locks(state, ops); | |
906 | if (status >= 0) { | |
907 | list_for_each_entry(lock, &state->lock_states, ls_locks) { | |
908 | if (!(lock->ls_flags & NFS_LOCK_INITIALIZED)) | |
909 | printk("%s: Lock reclaim failed!\n", | |
910 | __func__); | |
911 | } | |
912 | nfs4_put_open_state(state); | |
913 | goto restart; | |
914 | } | |
915 | } | |
916 | switch (status) { | |
917 | default: | |
918 | printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", | |
919 | __func__, status); | |
920 | case -ENOENT: | |
921 | case -ESTALE: | |
922 | /* | |
923 | * Open state on this file cannot be recovered | |
924 | * All we can do is revert to using the zero stateid. | |
925 | */ | |
926 | memset(state->stateid.data, 0, | |
927 | sizeof(state->stateid.data)); | |
928 | /* Mark the file as being 'closed' */ | |
929 | state->state = 0; | |
930 | break; | |
931 | case -NFS4ERR_RECLAIM_BAD: | |
932 | case -NFS4ERR_RECLAIM_CONFLICT: | |
933 | nfs4_state_mark_reclaim_nograce(sp->so_client, state); | |
934 | break; | |
935 | case -NFS4ERR_EXPIRED: | |
936 | case -NFS4ERR_NO_GRACE: | |
937 | nfs4_state_mark_reclaim_nograce(sp->so_client, state); | |
938 | case -NFS4ERR_STALE_CLIENTID: | |
939 | goto out_err; | |
940 | } | |
941 | nfs4_put_open_state(state); | |
942 | goto restart; | |
943 | } | |
944 | spin_unlock(&sp->so_lock); | |
945 | return 0; | |
946 | out_err: | |
947 | nfs4_put_open_state(state); | |
948 | return status; | |
949 | } | |
950 | ||
951 | static void nfs4_clear_open_state(struct nfs4_state *state) | |
952 | { | |
953 | struct nfs4_lock_state *lock; | |
954 | ||
955 | clear_bit(NFS_DELEGATED_STATE, &state->flags); | |
956 | clear_bit(NFS_O_RDONLY_STATE, &state->flags); | |
957 | clear_bit(NFS_O_WRONLY_STATE, &state->flags); | |
958 | clear_bit(NFS_O_RDWR_STATE, &state->flags); | |
959 | list_for_each_entry(lock, &state->lock_states, ls_locks) { | |
960 | lock->ls_seqid.flags = 0; | |
961 | lock->ls_flags &= ~NFS_LOCK_INITIALIZED; | |
962 | } | |
963 | } | |
964 | ||
965 | static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) | |
966 | { | |
967 | struct nfs4_state_owner *sp; | |
968 | struct rb_node *pos; | |
969 | struct nfs4_state *state; | |
970 | ||
971 | /* Reset all sequence ids to zero */ | |
972 | for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { | |
973 | sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); | |
974 | sp->so_seqid.flags = 0; | |
975 | spin_lock(&sp->so_lock); | |
976 | list_for_each_entry(state, &sp->so_states, open_states) { | |
977 | if (mark_reclaim(clp, state)) | |
978 | nfs4_clear_open_state(state); | |
979 | } | |
980 | spin_unlock(&sp->so_lock); | |
981 | } | |
982 | } | |
983 | ||
984 | static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) | |
985 | { | |
986 | /* Mark all delegations for reclaim */ | |
987 | nfs_delegation_mark_reclaim(clp); | |
988 | nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); | |
989 | } | |
990 | ||
991 | static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) | |
992 | { | |
993 | struct nfs4_state_owner *sp; | |
994 | struct rb_node *pos; | |
995 | struct nfs4_state *state; | |
996 | ||
997 | if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) | |
998 | return; | |
999 | ||
1000 | for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { | |
1001 | sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); | |
1002 | spin_lock(&sp->so_lock); | |
1003 | list_for_each_entry(state, &sp->so_states, open_states) { | |
1004 | if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags)) | |
1005 | continue; | |
1006 | nfs4_state_mark_reclaim_nograce(clp, state); | |
1007 | } | |
1008 | spin_unlock(&sp->so_lock); | |
1009 | } | |
1010 | ||
1011 | nfs_delegation_reap_unclaimed(clp); | |
1012 | } | |
1013 | ||
1014 | static void nfs_delegation_clear_all(struct nfs_client *clp) | |
1015 | { | |
1016 | nfs_delegation_mark_reclaim(clp); | |
1017 | nfs_delegation_reap_unclaimed(clp); | |
1018 | } | |
1019 | ||
1020 | static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp) | |
1021 | { | |
1022 | nfs_delegation_clear_all(clp); | |
1023 | nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce); | |
1024 | } | |
1025 | ||
1026 | static void nfs4_state_end_reclaim_nograce(struct nfs_client *clp) | |
1027 | { | |
1028 | clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); | |
1029 | } | |
1030 | ||
1031 | static void nfs4_recovery_handle_error(struct nfs_client *clp, int error) | |
1032 | { | |
1033 | switch (error) { | |
1034 | case -NFS4ERR_CB_PATH_DOWN: | |
1035 | nfs_handle_cb_pathdown(clp); | |
1036 | break; | |
1037 | case -NFS4ERR_STALE_CLIENTID: | |
1038 | case -NFS4ERR_LEASE_MOVED: | |
1039 | set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); | |
1040 | nfs4_state_start_reclaim_reboot(clp); | |
1041 | break; | |
1042 | case -NFS4ERR_EXPIRED: | |
1043 | set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); | |
1044 | nfs4_state_start_reclaim_nograce(clp); | |
1045 | case -NFS4ERR_BADSESSION: | |
1046 | case -NFS4ERR_BADSLOT: | |
1047 | case -NFS4ERR_BAD_HIGH_SLOT: | |
1048 | case -NFS4ERR_DEADSESSION: | |
1049 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: | |
1050 | case -NFS4ERR_SEQ_FALSE_RETRY: | |
1051 | case -NFS4ERR_SEQ_MISORDERED: | |
1052 | set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state); | |
1053 | } | |
1054 | } | |
1055 | ||
1056 | static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) | |
1057 | { | |
1058 | struct rb_node *pos; | |
1059 | int status = 0; | |
1060 | ||
1061 | restart: | |
1062 | spin_lock(&clp->cl_lock); | |
1063 | for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { | |
1064 | struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); | |
1065 | if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags)) | |
1066 | continue; | |
1067 | atomic_inc(&sp->so_count); | |
1068 | spin_unlock(&clp->cl_lock); | |
1069 | status = nfs4_reclaim_open_state(sp, ops); | |
1070 | if (status < 0) { | |
1071 | set_bit(ops->owner_flag_bit, &sp->so_flags); | |
1072 | nfs4_put_state_owner(sp); | |
1073 | nfs4_recovery_handle_error(clp, status); | |
1074 | return status; | |
1075 | } | |
1076 | nfs4_put_state_owner(sp); | |
1077 | goto restart; | |
1078 | } | |
1079 | spin_unlock(&clp->cl_lock); | |
1080 | return status; | |
1081 | } | |
1082 | ||
1083 | static int nfs4_check_lease(struct nfs_client *clp) | |
1084 | { | |
1085 | struct rpc_cred *cred; | |
1086 | int status = -NFS4ERR_EXPIRED; | |
1087 | ||
1088 | /* Is the client already known to have an expired lease? */ | |
1089 | if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) | |
1090 | return 0; | |
1091 | cred = nfs4_get_renew_cred(clp); | |
1092 | if (cred == NULL) { | |
1093 | cred = nfs4_get_setclientid_cred(clp); | |
1094 | if (cred == NULL) | |
1095 | goto out; | |
1096 | } | |
1097 | status = nfs4_proc_renew(clp, cred); | |
1098 | put_rpccred(cred); | |
1099 | out: | |
1100 | nfs4_recovery_handle_error(clp, status); | |
1101 | return status; | |
1102 | } | |
1103 | ||
1104 | static int nfs4_reclaim_lease(struct nfs_client *clp) | |
1105 | { | |
1106 | struct rpc_cred *cred; | |
1107 | int status = -ENOENT; | |
1108 | ||
1109 | cred = nfs4_get_setclientid_cred(clp); | |
1110 | if (cred != NULL) { | |
1111 | status = nfs4_init_client(clp, cred); | |
1112 | put_rpccred(cred); | |
1113 | /* Handle case where the user hasn't set up machine creds */ | |
1114 | if (status == -EACCES && cred == clp->cl_machine_cred) { | |
1115 | nfs4_clear_machine_cred(clp); | |
1116 | status = -EAGAIN; | |
1117 | } | |
1118 | if (status == -NFS4ERR_MINOR_VERS_MISMATCH) | |
1119 | status = -EPROTONOSUPPORT; | |
1120 | } | |
1121 | return status; | |
1122 | } | |
1123 | ||
1124 | #ifdef CONFIG_NFS_V4_1 | |
1125 | static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err) | |
1126 | { | |
1127 | switch (err) { | |
1128 | case -NFS4ERR_STALE_CLIENTID: | |
1129 | set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); | |
1130 | set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state); | |
1131 | } | |
1132 | } | |
1133 | ||
1134 | static int nfs4_reset_session(struct nfs_client *clp) | |
1135 | { | |
1136 | int status; | |
1137 | ||
1138 | status = nfs4_proc_destroy_session(clp->cl_session); | |
1139 | if (status && status != -NFS4ERR_BADSESSION && | |
1140 | status != -NFS4ERR_DEADSESSION) { | |
1141 | nfs4_session_recovery_handle_error(clp, status); | |
1142 | goto out; | |
1143 | } | |
1144 | ||
1145 | memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); | |
1146 | status = nfs4_proc_create_session(clp, 1); | |
1147 | if (status) | |
1148 | nfs4_session_recovery_handle_error(clp, status); | |
1149 | /* fall through*/ | |
1150 | out: | |
1151 | /* Wake up the next rpc task even on error */ | |
1152 | rpc_wake_up_next(&clp->cl_session->fc_slot_table.slot_tbl_waitq); | |
1153 | return status; | |
1154 | } | |
1155 | ||
1156 | static int nfs4_initialize_session(struct nfs_client *clp) | |
1157 | { | |
1158 | int status; | |
1159 | ||
1160 | status = nfs4_proc_create_session(clp, 0); | |
1161 | if (!status) { | |
1162 | nfs_mark_client_ready(clp, NFS_CS_READY); | |
1163 | } else if (status == -NFS4ERR_STALE_CLIENTID) { | |
1164 | set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); | |
1165 | set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state); | |
1166 | } else { | |
1167 | nfs_mark_client_ready(clp, status); | |
1168 | } | |
1169 | return status; | |
1170 | } | |
1171 | #else /* CONFIG_NFS_V4_1 */ | |
1172 | static int nfs4_reset_session(struct nfs_client *clp) { return 0; } | |
1173 | static int nfs4_initialize_session(struct nfs_client *clp) { return 0; } | |
1174 | #endif /* CONFIG_NFS_V4_1 */ | |
1175 | ||
1176 | static void nfs4_state_manager(struct nfs_client *clp) | |
1177 | { | |
1178 | int status = 0; | |
1179 | ||
1180 | /* Ensure exclusive access to NFSv4 state */ | |
1181 | for(;;) { | |
1182 | if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { | |
1183 | /* We're going to have to re-establish a clientid */ | |
1184 | status = nfs4_reclaim_lease(clp); | |
1185 | if (status) { | |
1186 | set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); | |
1187 | if (status == -EAGAIN) | |
1188 | continue; | |
1189 | if (clp->cl_cons_state == | |
1190 | NFS_CS_SESSION_INITING) | |
1191 | nfs_mark_client_ready(clp, status); | |
1192 | goto out_error; | |
1193 | } | |
1194 | clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); | |
1195 | } | |
1196 | ||
1197 | if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) { | |
1198 | status = nfs4_check_lease(clp); | |
1199 | if (status != 0) | |
1200 | continue; | |
1201 | } | |
1202 | /* Initialize or reset the session */ | |
1203 | if (nfs4_has_session(clp) && | |
1204 | test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) { | |
1205 | if (clp->cl_cons_state == NFS_CS_SESSION_INITING) | |
1206 | status = nfs4_initialize_session(clp); | |
1207 | else | |
1208 | status = nfs4_reset_session(clp); | |
1209 | if (status) { | |
1210 | if (status == -NFS4ERR_STALE_CLIENTID) | |
1211 | continue; | |
1212 | goto out_error; | |
1213 | } | |
1214 | } | |
1215 | /* First recover reboot state... */ | |
1216 | if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) { | |
1217 | status = nfs4_do_reclaim(clp, &nfs4_reboot_recovery_ops); | |
1218 | if (status == -NFS4ERR_STALE_CLIENTID) | |
1219 | continue; | |
1220 | if (test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) | |
1221 | continue; | |
1222 | nfs4_state_end_reclaim_reboot(clp); | |
1223 | continue; | |
1224 | } | |
1225 | ||
1226 | /* Now recover expired state... */ | |
1227 | if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { | |
1228 | status = nfs4_do_reclaim(clp, &nfs4_nograce_recovery_ops); | |
1229 | if (status < 0) { | |
1230 | set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); | |
1231 | if (status == -NFS4ERR_STALE_CLIENTID) | |
1232 | continue; | |
1233 | if (status == -NFS4ERR_EXPIRED) | |
1234 | continue; | |
1235 | if (test_bit(NFS4CLNT_SESSION_SETUP, | |
1236 | &clp->cl_state)) | |
1237 | continue; | |
1238 | goto out_error; | |
1239 | } else | |
1240 | nfs4_state_end_reclaim_nograce(clp); | |
1241 | continue; | |
1242 | } | |
1243 | ||
1244 | if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) { | |
1245 | nfs_client_return_marked_delegations(clp); | |
1246 | continue; | |
1247 | } | |
1248 | ||
1249 | nfs4_clear_state_manager_bit(clp); | |
1250 | /* Did we race with an attempt to give us more work? */ | |
1251 | if (clp->cl_state == 0) | |
1252 | break; | |
1253 | if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) | |
1254 | break; | |
1255 | } | |
1256 | return; | |
1257 | out_error: | |
1258 | printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s" | |
1259 | " with error %d\n", clp->cl_hostname, -status); | |
1260 | if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) | |
1261 | nfs4_state_end_reclaim_reboot(clp); | |
1262 | nfs4_clear_state_manager_bit(clp); | |
1263 | } | |
1264 | ||
1265 | static int nfs4_run_state_manager(void *ptr) | |
1266 | { | |
1267 | struct nfs_client *clp = ptr; | |
1268 | ||
1269 | allow_signal(SIGKILL); | |
1270 | nfs4_state_manager(clp); | |
1271 | nfs_put_client(clp); | |
1272 | module_put_and_exit(0); | |
1273 | return 0; | |
1274 | } | |
1275 | ||
1276 | /* | |
1277 | * Local variables: | |
1278 | * c-basic-offset: 8 | |
1279 | * End: | |
1280 | */ |