]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/nfs/nfs4state.c
NFS RPC_AUTH_GSS unsupported on v4.1 back channel
[mirror_ubuntu-jammy-kernel.git] / fs / nfs / nfs4state.c
CommitLineData
1da177e4
LT
1/*
2 * fs/nfs/nfs4state.c
3 *
4 * Client-side XDR for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
38 * subsequent patch.
39 */
40
6f43ddcc 41#include <linux/kernel.h>
1da177e4 42#include <linux/slab.h>
b89f4321 43#include <linux/fs.h>
1da177e4
LT
44#include <linux/nfs_fs.h>
45#include <linux/nfs_idmap.h>
5043e900
TM
46#include <linux/kthread.h>
47#include <linux/module.h>
9f958ab8 48#include <linux/random.h>
8c7597f6 49#include <linux/ratelimit.h>
1da177e4
LT
50#include <linux/workqueue.h>
51#include <linux/bitops.h>
52
4ce79717 53#include "nfs4_fs.h"
1da177e4
LT
54#include "callback.h"
55#include "delegation.h"
24c8dbbb 56#include "internal.h"
974cec8c 57#include "pnfs.h"
1da177e4
LT
58
59#define OPENOWNER_POOL_SIZE 8
60
4ce79717 61const nfs4_stateid zero_stateid;
1da177e4
LT
62
63static LIST_HEAD(nfs4_clientid_list);
64
591d71cb 65int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
1da177e4 66{
bb8b27e5 67 struct nfs4_setclientid_res clid;
f738f517
CL
68 unsigned short port;
69 int status;
70
71 port = nfs_callback_tcpport;
72 if (clp->cl_addr.ss_family == AF_INET6)
73 port = nfs_callback_tcpport6;
74
bb8b27e5
TM
75 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
76 if (status != 0)
77 goto out;
78 status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
79 if (status != 0)
80 goto out;
81 clp->cl_clientid = clid.clientid;
82 nfs4_schedule_state_renewal(clp);
83out:
1da177e4
LT
84 return status;
85}
86
a7b72103 87struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
a2b2bb88
TM
88{
89 struct rpc_cred *cred = NULL;
90
a2b2bb88
TM
91 if (clp->cl_machine_cred != NULL)
92 cred = get_rpccred(clp->cl_machine_cred);
a2b2bb88
TM
93 return cred;
94}
95
96static void nfs4_clear_machine_cred(struct nfs_client *clp)
97{
98 struct rpc_cred *cred;
99
100 spin_lock(&clp->cl_lock);
101 cred = clp->cl_machine_cred;
102 clp->cl_machine_cred = NULL;
103 spin_unlock(&clp->cl_lock);
104 if (cred != NULL)
105 put_rpccred(cred);
106}
107
6dc9d57a 108struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
b4454fe1
TM
109{
110 struct nfs4_state_owner *sp;
9f958ab8 111 struct rb_node *pos;
b4454fe1
TM
112 struct rpc_cred *cred = NULL;
113
9f958ab8
TM
114 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
115 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
b4454fe1
TM
116 if (list_empty(&sp->so_states))
117 continue;
118 cred = get_rpccred(sp->so_cred);
119 break;
120 }
121 return cred;
122}
123
b4b82607
AA
124#if defined(CONFIG_NFS_V4_1)
125
9430fb6b
RL
126static int nfs41_setup_state_renewal(struct nfs_client *clp)
127{
128 int status;
129 struct nfs_fsinfo fsinfo;
130
131 status = nfs4_proc_get_lease_time(clp, &fsinfo);
132 if (status == 0) {
133 /* Update lease time and schedule renewal */
134 spin_lock(&clp->cl_lock);
135 clp->cl_lease_time = fsinfo.lease_time * HZ;
136 clp->cl_last_renewal = jiffies;
137 spin_unlock(&clp->cl_lock);
138
139 nfs4_schedule_state_renewal(clp);
140 }
141
142 return status;
143}
144
5601a00d 145static void nfs4_end_drain_session(struct nfs_client *clp)
9dfdf404 146{
5601a00d 147 struct nfs4_session *ses = clp->cl_session;
689cf5c1
AB
148 int max_slots;
149
a2118c33
TM
150 if (ses == NULL)
151 return;
152 if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
689cf5c1
AB
153 spin_lock(&ses->fc_slot_table.slot_tbl_lock);
154 max_slots = ses->fc_slot_table.max_slots;
155 while (max_slots--) {
156 struct rpc_task *task;
157
158 task = rpc_wake_up_next(&ses->fc_slot_table.
159 slot_tbl_waitq);
160 if (!task)
161 break;
162 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
163 }
164 spin_unlock(&ses->fc_slot_table.slot_tbl_lock);
165 }
9dfdf404
RL
166}
167
5601a00d 168static int nfs4_begin_drain_session(struct nfs_client *clp)
9dfdf404 169{
5601a00d 170 struct nfs4_session *ses = clp->cl_session;
9dfdf404
RL
171 struct nfs4_slot_table *tbl = &ses->fc_slot_table;
172
173 spin_lock(&tbl->slot_tbl_lock);
a2118c33 174 set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
9dfdf404
RL
175 if (tbl->highest_used_slotid != -1) {
176 INIT_COMPLETION(ses->complete);
177 spin_unlock(&tbl->slot_tbl_lock);
178 return wait_for_completion_interruptible(&ses->complete);
179 }
180 spin_unlock(&tbl->slot_tbl_lock);
181 return 0;
182}
183
4d643d1d
AA
184int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
185{
186 int status;
187
38045412 188 nfs4_begin_drain_session(clp);
4d643d1d 189 status = nfs4_proc_exchange_id(clp, cred);
9430fb6b
RL
190 if (status != 0)
191 goto out;
192 status = nfs4_proc_create_session(clp);
193 if (status != 0)
194 goto out;
2c2618c6
AA
195 status = nfs4_set_callback_sessionid(clp);
196 if (status != 0) {
197 printk(KERN_WARNING "Sessionid not set. No callback service\n");
198 nfs_callback_down(1);
199 status = 0;
200 }
9430fb6b
RL
201 nfs41_setup_state_renewal(clp);
202 nfs_mark_client_ready(clp, NFS_CS_READY);
203out:
4d643d1d
AA
204 return status;
205}
206
b4b82607
AA
207struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
208{
209 struct rpc_cred *cred;
210
211 spin_lock(&clp->cl_lock);
212 cred = nfs4_get_machine_cred_locked(clp);
213 spin_unlock(&clp->cl_lock);
214 return cred;
215}
216
217#endif /* CONFIG_NFS_V4_1 */
218
a7b72103 219struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
286d7d6a
TM
220{
221 struct nfs4_state_owner *sp;
9f958ab8 222 struct rb_node *pos;
a2b2bb88 223 struct rpc_cred *cred;
286d7d6a 224
6dc9d57a
TM
225 spin_lock(&clp->cl_lock);
226 cred = nfs4_get_machine_cred_locked(clp);
a2b2bb88
TM
227 if (cred != NULL)
228 goto out;
9f958ab8
TM
229 pos = rb_first(&clp->cl_state_owners);
230 if (pos != NULL) {
231 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
a2b2bb88 232 cred = get_rpccred(sp->so_cred);
286d7d6a 233 }
a2b2bb88 234out:
6dc9d57a 235 spin_unlock(&clp->cl_lock);
a2b2bb88 236 return cred;
286d7d6a
TM
237}
238
9f958ab8
TM
239static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
240 __u64 minval, int maxbits)
241{
242 struct rb_node **p, *parent;
243 struct nfs_unique_id *pos;
244 __u64 mask = ~0ULL;
245
246 if (maxbits < 64)
247 mask = (1ULL << maxbits) - 1ULL;
248
249 /* Ensure distribution is more or less flat */
250 get_random_bytes(&new->id, sizeof(new->id));
251 new->id &= mask;
252 if (new->id < minval)
253 new->id += minval;
254retry:
255 p = &root->rb_node;
256 parent = NULL;
257
258 while (*p != NULL) {
259 parent = *p;
260 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
261
262 if (new->id < pos->id)
263 p = &(*p)->rb_left;
264 else if (new->id > pos->id)
265 p = &(*p)->rb_right;
266 else
267 goto id_exists;
268 }
269 rb_link_node(&new->rb_node, parent, p);
270 rb_insert_color(&new->rb_node, root);
271 return;
272id_exists:
273 for (;;) {
274 new->id++;
275 if (new->id < minval || (new->id & mask) != new->id) {
276 new->id = minval;
277 break;
278 }
279 parent = rb_next(parent);
280 if (parent == NULL)
281 break;
282 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
283 if (new->id < pos->id)
284 break;
285 }
286 goto retry;
287}
288
289static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
290{
291 rb_erase(&id->rb_node, root);
292}
293
1da177e4 294static struct nfs4_state_owner *
6f2e64d3 295nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
1da177e4 296{
6f2e64d3 297 struct nfs_client *clp = server->nfs_client;
9f958ab8
TM
298 struct rb_node **p = &clp->cl_state_owners.rb_node,
299 *parent = NULL;
1da177e4
LT
300 struct nfs4_state_owner *sp, *res = NULL;
301
9f958ab8
TM
302 while (*p != NULL) {
303 parent = *p;
304 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
305
6f2e64d3
TM
306 if (server < sp->so_server) {
307 p = &parent->rb_left;
308 continue;
309 }
310 if (server > sp->so_server) {
311 p = &parent->rb_right;
312 continue;
313 }
9f958ab8
TM
314 if (cred < sp->so_cred)
315 p = &parent->rb_left;
316 else if (cred > sp->so_cred)
317 p = &parent->rb_right;
318 else {
319 atomic_inc(&sp->so_count);
320 res = sp;
321 break;
322 }
1da177e4
LT
323 }
324 return res;
325}
326
9f958ab8
TM
327static struct nfs4_state_owner *
328nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
329{
330 struct rb_node **p = &clp->cl_state_owners.rb_node,
331 *parent = NULL;
332 struct nfs4_state_owner *sp;
333
334 while (*p != NULL) {
335 parent = *p;
336 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
337
6f2e64d3
TM
338 if (new->so_server < sp->so_server) {
339 p = &parent->rb_left;
340 continue;
341 }
342 if (new->so_server > sp->so_server) {
343 p = &parent->rb_right;
344 continue;
345 }
9f958ab8
TM
346 if (new->so_cred < sp->so_cred)
347 p = &parent->rb_left;
348 else if (new->so_cred > sp->so_cred)
349 p = &parent->rb_right;
350 else {
351 atomic_inc(&sp->so_count);
352 return sp;
353 }
354 }
355 nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
356 rb_link_node(&new->so_client_node, parent, p);
357 rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
358 return new;
359}
360
361static void
362nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
363{
364 if (!RB_EMPTY_NODE(&sp->so_client_node))
365 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
366 nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
367}
368
1da177e4
LT
369/*
370 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
371 * create a new state_owner.
372 *
373 */
374static struct nfs4_state_owner *
375nfs4_alloc_state_owner(void)
376{
377 struct nfs4_state_owner *sp;
378
8535b2be 379 sp = kzalloc(sizeof(*sp),GFP_NOFS);
1da177e4
LT
380 if (!sp)
381 return NULL;
ec073428 382 spin_lock_init(&sp->so_lock);
1da177e4 383 INIT_LIST_HEAD(&sp->so_states);
cee54fc9
TM
384 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
385 sp->so_seqid.sequence = &sp->so_sequence;
386 spin_lock_init(&sp->so_sequence.lock);
387 INIT_LIST_HEAD(&sp->so_sequence.list);
1da177e4
LT
388 atomic_set(&sp->so_count, 1);
389 return sp;
390}
391
1d2e88e7 392static void
1da177e4
LT
393nfs4_drop_state_owner(struct nfs4_state_owner *sp)
394{
9f958ab8 395 if (!RB_EMPTY_NODE(&sp->so_client_node)) {
1f0e890d 396 struct nfs_client *clp = sp->so_server->nfs_client;
9f958ab8
TM
397
398 spin_lock(&clp->cl_lock);
399 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
400 RB_CLEAR_NODE(&sp->so_client_node);
401 spin_unlock(&clp->cl_lock);
402 }
1da177e4
LT
403}
404
1da177e4
LT
405struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
406{
7539bbab 407 struct nfs_client *clp = server->nfs_client;
1da177e4
LT
408 struct nfs4_state_owner *sp, *new;
409
1da177e4 410 spin_lock(&clp->cl_lock);
6f2e64d3 411 sp = nfs4_find_state_owner(server, cred);
1da177e4 412 spin_unlock(&clp->cl_lock);
1da177e4
LT
413 if (sp != NULL)
414 return sp;
9f958ab8
TM
415 new = nfs4_alloc_state_owner();
416 if (new == NULL)
417 return NULL;
6f2e64d3 418 new->so_server = server;
9f958ab8
TM
419 new->so_cred = cred;
420 spin_lock(&clp->cl_lock);
421 sp = nfs4_insert_state_owner(clp, new);
422 spin_unlock(&clp->cl_lock);
423 if (sp == new)
424 get_rpccred(cred);
f6a1cc89
TM
425 else {
426 rpc_destroy_wait_queue(&new->so_sequence.wait);
9f958ab8 427 kfree(new);
f6a1cc89 428 }
9f958ab8 429 return sp;
1da177e4
LT
430}
431
1da177e4
LT
432void nfs4_put_state_owner(struct nfs4_state_owner *sp)
433{
1f0e890d 434 struct nfs_client *clp = sp->so_server->nfs_client;
1da177e4
LT
435 struct rpc_cred *cred = sp->so_cred;
436
437 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
438 return;
9f958ab8 439 nfs4_remove_state_owner(clp, sp);
1da177e4 440 spin_unlock(&clp->cl_lock);
f6a1cc89 441 rpc_destroy_wait_queue(&sp->so_sequence.wait);
1da177e4
LT
442 put_rpccred(cred);
443 kfree(sp);
444}
445
446static struct nfs4_state *
447nfs4_alloc_open_state(void)
448{
449 struct nfs4_state *state;
450
8535b2be 451 state = kzalloc(sizeof(*state), GFP_NOFS);
1da177e4
LT
452 if (!state)
453 return NULL;
1da177e4
LT
454 atomic_set(&state->count, 1);
455 INIT_LIST_HEAD(&state->lock_states);
8d0a8a9d 456 spin_lock_init(&state->state_lock);
8bda4e4c 457 seqlock_init(&state->seqlock);
1da177e4
LT
458 return state;
459}
460
4cecb76f 461void
dc0b027d 462nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
4cecb76f 463{
dc0b027d 464 if (state->state == fmode)
4cecb76f
TM
465 return;
466 /* NB! List reordering - see the reclaim code for why. */
dc0b027d
TM
467 if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
468 if (fmode & FMODE_WRITE)
4cecb76f
TM
469 list_move(&state->open_states, &state->owner->so_states);
470 else
471 list_move_tail(&state->open_states, &state->owner->so_states);
472 }
dc0b027d 473 state->state = fmode;
4cecb76f
TM
474}
475
1da177e4
LT
476static struct nfs4_state *
477__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
478{
479 struct nfs_inode *nfsi = NFS_I(inode);
480 struct nfs4_state *state;
481
482 list_for_each_entry(state, &nfsi->open_states, inode_states) {
1c816efa 483 if (state->owner != owner)
1da177e4 484 continue;
1c816efa 485 if (atomic_inc_not_zero(&state->count))
1da177e4 486 return state;
1da177e4
LT
487 }
488 return NULL;
489}
490
1da177e4
LT
491static void
492nfs4_free_open_state(struct nfs4_state *state)
493{
494 kfree(state);
495}
496
497struct nfs4_state *
498nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
499{
500 struct nfs4_state *state, *new;
501 struct nfs_inode *nfsi = NFS_I(inode);
502
503 spin_lock(&inode->i_lock);
504 state = __nfs4_find_state_byowner(inode, owner);
505 spin_unlock(&inode->i_lock);
506 if (state)
507 goto out;
508 new = nfs4_alloc_open_state();
ec073428 509 spin_lock(&owner->so_lock);
1da177e4
LT
510 spin_lock(&inode->i_lock);
511 state = __nfs4_find_state_byowner(inode, owner);
512 if (state == NULL && new != NULL) {
513 state = new;
1da177e4
LT
514 state->owner = owner;
515 atomic_inc(&owner->so_count);
516 list_add(&state->inode_states, &nfsi->open_states);
517 state->inode = igrab(inode);
518 spin_unlock(&inode->i_lock);
ec073428
TM
519 /* Note: The reclaim code dictates that we add stateless
520 * and read-only stateids to the end of the list */
521 list_add_tail(&state->open_states, &owner->so_states);
522 spin_unlock(&owner->so_lock);
1da177e4
LT
523 } else {
524 spin_unlock(&inode->i_lock);
ec073428 525 spin_unlock(&owner->so_lock);
1da177e4
LT
526 if (new)
527 nfs4_free_open_state(new);
528 }
529out:
530 return state;
531}
532
1da177e4
LT
533void nfs4_put_open_state(struct nfs4_state *state)
534{
535 struct inode *inode = state->inode;
536 struct nfs4_state_owner *owner = state->owner;
537
ec073428 538 if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
1da177e4 539 return;
ec073428 540 spin_lock(&inode->i_lock);
ba683031 541 list_del(&state->inode_states);
1da177e4 542 list_del(&state->open_states);
ec073428
TM
543 spin_unlock(&inode->i_lock);
544 spin_unlock(&owner->so_lock);
1da177e4 545 iput(inode);
1da177e4
LT
546 nfs4_free_open_state(state);
547 nfs4_put_state_owner(owner);
548}
549
550/*
83c9d41e 551 * Close the current file.
1da177e4 552 */
8535b2be
TM
553static void __nfs4_close(struct path *path, struct nfs4_state *state,
554 fmode_t fmode, gfp_t gfp_mask, int wait)
1da177e4 555{
1da177e4 556 struct nfs4_state_owner *owner = state->owner;
003707c7 557 int call_close = 0;
dc0b027d 558 fmode_t newstate;
1da177e4
LT
559
560 atomic_inc(&owner->so_count);
1da177e4 561 /* Protect against nfs4_find_state() */
ec073428 562 spin_lock(&owner->so_lock);
dc0b027d 563 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
e7616923
TM
564 case FMODE_READ:
565 state->n_rdonly--;
566 break;
567 case FMODE_WRITE:
568 state->n_wronly--;
569 break;
570 case FMODE_READ|FMODE_WRITE:
571 state->n_rdwr--;
572 }
003707c7 573 newstate = FMODE_READ|FMODE_WRITE;
e7616923 574 if (state->n_rdwr == 0) {
003707c7 575 if (state->n_rdonly == 0) {
e7616923 576 newstate &= ~FMODE_READ;
003707c7
TM
577 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
578 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
579 }
580 if (state->n_wronly == 0) {
e7616923 581 newstate &= ~FMODE_WRITE;
003707c7
TM
582 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
583 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
584 }
585 if (newstate == 0)
586 clear_bit(NFS_DELEGATED_STATE, &state->flags);
e7616923 587 }
003707c7 588 nfs4_state_set_mode_locked(state, newstate);
ec073428 589 spin_unlock(&owner->so_lock);
4cecb76f 590
003707c7 591 if (!call_close) {
b39e625b
TM
592 nfs4_put_open_state(state);
593 nfs4_put_state_owner(owner);
594 } else
8535b2be 595 nfs4_do_close(path, state, gfp_mask, wait);
a49c3c77
TM
596}
597
dc0b027d 598void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
a49c3c77 599{
8535b2be 600 __nfs4_close(path, state, fmode, GFP_NOFS, 0);
a49c3c77
TM
601}
602
dc0b027d 603void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
a49c3c77 604{
8535b2be 605 __nfs4_close(path, state, fmode, GFP_KERNEL, 1);
1da177e4
LT
606}
607
608/*
609 * Search the state->lock_states for an existing lock_owner
610 * that is compatible with current->files
611 */
612static struct nfs4_lock_state *
77041ed9 613__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
1da177e4
LT
614{
615 struct nfs4_lock_state *pos;
616 list_for_each_entry(pos, &state->lock_states, ls_locks) {
77041ed9 617 if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type)
1da177e4 618 continue;
77041ed9
TM
619 switch (pos->ls_owner.lo_type) {
620 case NFS4_POSIX_LOCK_TYPE:
621 if (pos->ls_owner.lo_u.posix_owner != fl_owner)
622 continue;
623 break;
624 case NFS4_FLOCK_LOCK_TYPE:
625 if (pos->ls_owner.lo_u.flock_owner != fl_pid)
626 continue;
627 }
1da177e4
LT
628 atomic_inc(&pos->ls_count);
629 return pos;
630 }
631 return NULL;
632}
633
1da177e4
LT
634/*
635 * Return a compatible lock_state. If no initialized lock_state structure
636 * exists, return an uninitialized one.
637 *
1da177e4 638 */
77041ed9 639static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
1da177e4
LT
640{
641 struct nfs4_lock_state *lsp;
1f0e890d 642 struct nfs_client *clp = state->owner->so_server->nfs_client;
1da177e4 643
8535b2be 644 lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
1da177e4
LT
645 if (lsp == NULL)
646 return NULL;
d0dc3701
TM
647 rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
648 spin_lock_init(&lsp->ls_sequence.lock);
649 INIT_LIST_HEAD(&lsp->ls_sequence.list);
650 lsp->ls_seqid.sequence = &lsp->ls_sequence;
1da177e4 651 atomic_set(&lsp->ls_count, 1);
b64aec8d 652 lsp->ls_state = state;
77041ed9
TM
653 lsp->ls_owner.lo_type = type;
654 switch (lsp->ls_owner.lo_type) {
655 case NFS4_FLOCK_LOCK_TYPE:
656 lsp->ls_owner.lo_u.flock_owner = fl_pid;
657 break;
658 case NFS4_POSIX_LOCK_TYPE:
659 lsp->ls_owner.lo_u.posix_owner = fl_owner;
660 break;
661 default:
662 kfree(lsp);
663 return NULL;
664 }
1da177e4 665 spin_lock(&clp->cl_lock);
9f958ab8 666 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
1da177e4 667 spin_unlock(&clp->cl_lock);
8d0a8a9d 668 INIT_LIST_HEAD(&lsp->ls_locks);
1da177e4
LT
669 return lsp;
670}
671
9f958ab8
TM
672static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
673{
1f0e890d 674 struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
9f958ab8
TM
675
676 spin_lock(&clp->cl_lock);
677 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
678 spin_unlock(&clp->cl_lock);
f6a1cc89 679 rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
9f958ab8
TM
680 kfree(lsp);
681}
682
1da177e4
LT
683/*
684 * Return a compatible lock_state. If no initialized lock_state structure
685 * exists, return an uninitialized one.
686 *
1da177e4 687 */
77041ed9 688static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type)
1da177e4 689{
8d0a8a9d 690 struct nfs4_lock_state *lsp, *new = NULL;
1da177e4 691
8d0a8a9d
TM
692 for(;;) {
693 spin_lock(&state->state_lock);
77041ed9 694 lsp = __nfs4_find_lock_state(state, owner, pid, type);
8d0a8a9d
TM
695 if (lsp != NULL)
696 break;
697 if (new != NULL) {
8d0a8a9d
TM
698 list_add(&new->ls_locks, &state->lock_states);
699 set_bit(LK_STATE_IN_USE, &state->flags);
700 lsp = new;
701 new = NULL;
702 break;
703 }
704 spin_unlock(&state->state_lock);
77041ed9 705 new = nfs4_alloc_lock_state(state, owner, pid, type);
8d0a8a9d
TM
706 if (new == NULL)
707 return NULL;
708 }
709 spin_unlock(&state->state_lock);
9f958ab8
TM
710 if (new != NULL)
711 nfs4_free_lock_state(new);
1da177e4
LT
712 return lsp;
713}
714
715/*
8d0a8a9d
TM
716 * Release reference to lock_state, and free it if we see that
717 * it is no longer in use
1da177e4 718 */
faf5f49c 719void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
1da177e4 720{
8d0a8a9d 721 struct nfs4_state *state;
1da177e4 722
8d0a8a9d
TM
723 if (lsp == NULL)
724 return;
725 state = lsp->ls_state;
726 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
727 return;
728 list_del(&lsp->ls_locks);
729 if (list_empty(&state->lock_states))
730 clear_bit(LK_STATE_IN_USE, &state->flags);
731 spin_unlock(&state->state_lock);
d3c7b7cc
TM
732 if (lsp->ls_flags & NFS_LOCK_INITIALIZED)
733 nfs4_release_lockowner(lsp);
9f958ab8 734 nfs4_free_lock_state(lsp);
1da177e4
LT
735}
736
8d0a8a9d 737static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
1da177e4 738{
8d0a8a9d 739 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
1da177e4 740
8d0a8a9d
TM
741 dst->fl_u.nfs4_fl.owner = lsp;
742 atomic_inc(&lsp->ls_count);
743}
1da177e4 744
8d0a8a9d 745static void nfs4_fl_release_lock(struct file_lock *fl)
1da177e4 746{
8d0a8a9d 747 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
1da177e4
LT
748}
749
6aed6285 750static const struct file_lock_operations nfs4_fl_lock_ops = {
8d0a8a9d
TM
751 .fl_copy_lock = nfs4_fl_copy_lock,
752 .fl_release_private = nfs4_fl_release_lock,
753};
754
755int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
1da177e4 756{
8d0a8a9d
TM
757 struct nfs4_lock_state *lsp;
758
759 if (fl->fl_ops != NULL)
760 return 0;
77041ed9
TM
761 if (fl->fl_flags & FL_POSIX)
762 lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE);
763 else if (fl->fl_flags & FL_FLOCK)
764 lsp = nfs4_get_lock_state(state, 0, fl->fl_pid, NFS4_FLOCK_LOCK_TYPE);
765 else
766 return -EINVAL;
8d0a8a9d
TM
767 if (lsp == NULL)
768 return -ENOMEM;
769 fl->fl_u.nfs4_fl.owner = lsp;
770 fl->fl_ops = &nfs4_fl_lock_ops;
771 return 0;
1da177e4
LT
772}
773
8d0a8a9d
TM
774/*
775 * Byte-range lock aware utility to initialize the stateid of read/write
776 * requests.
1da177e4 777 */
77041ed9 778void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid)
1da177e4 779{
8d0a8a9d 780 struct nfs4_lock_state *lsp;
8bda4e4c 781 int seq;
1da177e4 782
8bda4e4c
TM
783 do {
784 seq = read_seqbegin(&state->seqlock);
785 memcpy(dst, &state->stateid, sizeof(*dst));
786 } while (read_seqretry(&state->seqlock, seq));
8d0a8a9d
TM
787 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
788 return;
1da177e4 789
8d0a8a9d 790 spin_lock(&state->state_lock);
77041ed9 791 lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
8d0a8a9d
TM
792 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
793 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
794 spin_unlock(&state->state_lock);
1da177e4
LT
795 nfs4_put_lock_state(lsp);
796}
797
8535b2be 798struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
cee54fc9 799{
cee54fc9
TM
800 struct nfs_seqid *new;
801
8535b2be 802 new = kmalloc(sizeof(*new), gfp_mask);
cee54fc9
TM
803 if (new != NULL) {
804 new->sequence = counter;
2f74c0a0 805 INIT_LIST_HEAD(&new->list);
cee54fc9
TM
806 }
807 return new;
808}
809
72211dbe 810void nfs_release_seqid(struct nfs_seqid *seqid)
1da177e4 811{
2f74c0a0
TM
812 if (!list_empty(&seqid->list)) {
813 struct rpc_sequence *sequence = seqid->sequence->sequence;
cee54fc9 814
2f74c0a0 815 spin_lock(&sequence->lock);
72211dbe 816 list_del_init(&seqid->list);
2f74c0a0
TM
817 spin_unlock(&sequence->lock);
818 rpc_wake_up(&sequence->wait);
819 }
72211dbe
TM
820}
821
822void nfs_free_seqid(struct nfs_seqid *seqid)
823{
824 nfs_release_seqid(seqid);
cee54fc9 825 kfree(seqid);
1da177e4
LT
826}
827
828/*
cee54fc9
TM
829 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
830 * failed with a seqid incrementing error -
831 * see comments nfs_fs.h:seqid_mutating_error()
832 */
88d90939 833static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
cee54fc9 834{
2f74c0a0 835 BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
cee54fc9
TM
836 switch (status) {
837 case 0:
838 break;
839 case -NFS4ERR_BAD_SEQID:
6f43ddcc
TM
840 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
841 return;
842 printk(KERN_WARNING "NFS: v4 server returned a bad"
497799e7
DM
843 " sequence-id error on an"
844 " unconfirmed sequence %p!\n",
6f43ddcc 845 seqid->sequence);
cee54fc9
TM
846 case -NFS4ERR_STALE_CLIENTID:
847 case -NFS4ERR_STALE_STATEID:
848 case -NFS4ERR_BAD_STATEID:
849 case -NFS4ERR_BADXDR:
850 case -NFS4ERR_RESOURCE:
851 case -NFS4ERR_NOFILEHANDLE:
852 /* Non-seqid mutating errors */
853 return;
854 };
855 /*
856 * Note: no locking needed as we are guaranteed to be first
857 * on the sequence list
858 */
859 seqid->sequence->counter++;
860}
861
862void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
863{
34dc1ad7
BH
864 struct nfs4_state_owner *sp = container_of(seqid->sequence,
865 struct nfs4_state_owner, so_seqid);
866 struct nfs_server *server = sp->so_server;
867
868 if (status == -NFS4ERR_BAD_SEQID)
1da177e4 869 nfs4_drop_state_owner(sp);
34dc1ad7
BH
870 if (!nfs4_has_session(server->nfs_client))
871 nfs_increment_seqid(status, seqid);
cee54fc9
TM
872}
873
874/*
cee54fc9
TM
875 * Increment the seqid if the LOCK/LOCKU succeeded, or
876 * failed with a seqid incrementing error -
877 * see comments nfs_fs.h:seqid_mutating_error()
878 */
879void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
880{
88d90939 881 nfs_increment_seqid(status, seqid);
cee54fc9
TM
882}
883
884int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
885{
886 struct rpc_sequence *sequence = seqid->sequence->sequence;
887 int status = 0;
888
889 spin_lock(&sequence->lock);
2f74c0a0
TM
890 if (list_empty(&seqid->list))
891 list_add_tail(&seqid->list, &sequence->list);
892 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
893 goto unlock;
5d00837b 894 rpc_sleep_on(&sequence->wait, task, NULL);
2f74c0a0
TM
895 status = -EAGAIN;
896unlock:
cee54fc9
TM
897 spin_unlock(&sequence->lock);
898 return status;
1da177e4
LT
899}
900
e005e804 901static int nfs4_run_state_manager(void *);
1da177e4 902
e005e804 903static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
433fbe4c
TM
904{
905 smp_mb__before_clear_bit();
e005e804 906 clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
433fbe4c 907 smp_mb__after_clear_bit();
e005e804 908 wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
433fbe4c
TM
909 rpc_wake_up(&clp->cl_rpcwaitq);
910}
911
1da177e4 912/*
e005e804 913 * Schedule the nfs_client asynchronous state management routine
1da177e4 914 */
b0d3ded1 915void nfs4_schedule_state_manager(struct nfs_client *clp)
1da177e4 916{
5043e900 917 struct task_struct *task;
1da177e4 918
e005e804
TM
919 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
920 return;
5043e900
TM
921 __module_get(THIS_MODULE);
922 atomic_inc(&clp->cl_count);
e005e804 923 task = kthread_run(nfs4_run_state_manager, clp, "%s-manager",
5d8515ca
CL
924 rpc_peeraddr2str(clp->cl_rpcclient,
925 RPC_DISPLAY_ADDR));
5043e900
TM
926 if (!IS_ERR(task))
927 return;
e005e804 928 nfs4_clear_state_manager_bit(clp);
24c8dbbb 929 nfs_put_client(clp);
5043e900 930 module_put(THIS_MODULE);
1da177e4
LT
931}
932
933/*
934 * Schedule a state recovery attempt
935 */
adfa6f98 936void nfs4_schedule_state_recovery(struct nfs_client *clp)
1da177e4
LT
937{
938 if (!clp)
939 return;
e598d843
TM
940 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
941 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
e005e804 942 nfs4_schedule_state_manager(clp);
1da177e4
LT
943}
944
a2c0b9e2 945int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
b79a4a1b
TM
946{
947
948 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
949 /* Don't recover state that expired before the reboot */
950 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
951 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
952 return 0;
953 }
7eff03ae 954 set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
b79a4a1b
TM
955 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
956 return 1;
957}
958
9e33bed5 959int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
b79a4a1b
TM
960{
961 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
962 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
7eff03ae 963 set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
b79a4a1b
TM
964 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
965 return 1;
966}
967
02860014 968static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
1da177e4
LT
969{
970 struct inode *inode = state->inode;
19e03c57 971 struct nfs_inode *nfsi = NFS_I(inode);
1da177e4
LT
972 struct file_lock *fl;
973 int status = 0;
974
3f09df70
TM
975 if (inode->i_flock == NULL)
976 return 0;
977
978 /* Guard against delegation returns and new lock/unlock calls */
19e03c57 979 down_write(&nfsi->rwsem);
3f09df70 980 /* Protect inode->i_flock using the BKL */
b89f4321 981 lock_flocks();
90dc7d27 982 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
43b2a33a 983 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
1da177e4 984 continue;
cd3758e3 985 if (nfs_file_open_context(fl->fl_file)->state != state)
1da177e4 986 continue;
b89f4321 987 unlock_flocks();
1da177e4 988 status = ops->recover_lock(state, fl);
1da177e4 989 switch (status) {
965b5d67
TM
990 case 0:
991 break;
992 case -ESTALE:
993 case -NFS4ERR_ADMIN_REVOKED:
994 case -NFS4ERR_STALE_STATEID:
995 case -NFS4ERR_BAD_STATEID:
996 case -NFS4ERR_EXPIRED:
997 case -NFS4ERR_NO_GRACE:
998 case -NFS4ERR_STALE_CLIENTID:
9c4c761a
TM
999 case -NFS4ERR_BADSESSION:
1000 case -NFS4ERR_BADSLOT:
1001 case -NFS4ERR_BAD_HIGH_SLOT:
1002 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
965b5d67 1003 goto out;
1da177e4
LT
1004 default:
1005 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
3110ff80 1006 __func__, status);
965b5d67
TM
1007 case -ENOMEM:
1008 case -NFS4ERR_DENIED:
1da177e4
LT
1009 case -NFS4ERR_RECLAIM_BAD:
1010 case -NFS4ERR_RECLAIM_CONFLICT:
43b2a33a 1011 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
965b5d67 1012 status = 0;
1da177e4 1013 }
b89f4321 1014 lock_flocks();
1da177e4 1015 }
b89f4321 1016 unlock_flocks();
965b5d67 1017out:
19e03c57 1018 up_write(&nfsi->rwsem);
1da177e4
LT
1019 return status;
1020}
1021
02860014 1022static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
1da177e4
LT
1023{
1024 struct nfs4_state *state;
1025 struct nfs4_lock_state *lock;
1026 int status = 0;
1027
1028 /* Note: we rely on the sp->so_states list being ordered
1029 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
1030 * states first.
1031 * This is needed to ensure that the server won't give us any
1032 * read delegations that we have to return if, say, we are
1033 * recovering after a network partition or a reboot from a
1034 * server that doesn't support a grace period.
1035 */
fe1d8195
TM
1036restart:
1037 spin_lock(&sp->so_lock);
1da177e4 1038 list_for_each_entry(state, &sp->so_states, open_states) {
b79a4a1b
TM
1039 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
1040 continue;
1da177e4
LT
1041 if (state->state == 0)
1042 continue;
fe1d8195
TM
1043 atomic_inc(&state->count);
1044 spin_unlock(&sp->so_lock);
1da177e4 1045 status = ops->recover_open(sp, state);
1da177e4 1046 if (status >= 0) {
02860014
TM
1047 status = nfs4_reclaim_locks(state, ops);
1048 if (status >= 0) {
1049 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1050 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
1051 printk("%s: Lock reclaim failed!\n",
3110ff80 1052 __func__);
02860014 1053 }
fe1d8195
TM
1054 nfs4_put_open_state(state);
1055 goto restart;
1da177e4 1056 }
1da177e4
LT
1057 }
1058 switch (status) {
1059 default:
1060 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
3110ff80 1061 __func__, status);
1da177e4 1062 case -ENOENT:
965b5d67 1063 case -ENOMEM:
b79a4a1b 1064 case -ESTALE:
1da177e4
LT
1065 /*
1066 * Open state on this file cannot be recovered
1067 * All we can do is revert to using the zero stateid.
1068 */
1069 memset(state->stateid.data, 0,
1070 sizeof(state->stateid.data));
1071 /* Mark the file as being 'closed' */
1072 state->state = 0;
1073 break;
168667c4
TM
1074 case -EKEYEXPIRED:
1075 /*
1076 * User RPCSEC_GSS context has expired.
1077 * We cannot recover this stateid now, so
1078 * skip it and allow recovery thread to
1079 * proceed.
1080 */
1081 break;
965b5d67
TM
1082 case -NFS4ERR_ADMIN_REVOKED:
1083 case -NFS4ERR_STALE_STATEID:
1084 case -NFS4ERR_BAD_STATEID:
b79a4a1b
TM
1085 case -NFS4ERR_RECLAIM_BAD:
1086 case -NFS4ERR_RECLAIM_CONFLICT:
1f0e890d 1087 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
b79a4a1b 1088 break;
1da177e4
LT
1089 case -NFS4ERR_EXPIRED:
1090 case -NFS4ERR_NO_GRACE:
1f0e890d 1091 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1da177e4 1092 case -NFS4ERR_STALE_CLIENTID:
9c4c761a
TM
1093 case -NFS4ERR_BADSESSION:
1094 case -NFS4ERR_BADSLOT:
1095 case -NFS4ERR_BAD_HIGH_SLOT:
1096 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1da177e4
LT
1097 goto out_err;
1098 }
fe1d8195
TM
1099 nfs4_put_open_state(state);
1100 goto restart;
1da177e4 1101 }
fe1d8195 1102 spin_unlock(&sp->so_lock);
1da177e4
LT
1103 return 0;
1104out_err:
fe1d8195 1105 nfs4_put_open_state(state);
1da177e4
LT
1106 return status;
1107}
1108
b79a4a1b
TM
1109static void nfs4_clear_open_state(struct nfs4_state *state)
1110{
1111 struct nfs4_lock_state *lock;
1112
1113 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1114 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1115 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1116 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1117 list_for_each_entry(lock, &state->lock_states, ls_locks) {
b79a4a1b
TM
1118 lock->ls_seqid.flags = 0;
1119 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
1120 }
1121}
1122
1123static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
cee54fc9
TM
1124{
1125 struct nfs4_state_owner *sp;
9f958ab8 1126 struct rb_node *pos;
cee54fc9 1127 struct nfs4_state *state;
cee54fc9
TM
1128
1129 /* Reset all sequence ids to zero */
9f958ab8
TM
1130 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1131 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
cee54fc9 1132 sp->so_seqid.flags = 0;
ec073428 1133 spin_lock(&sp->so_lock);
cee54fc9 1134 list_for_each_entry(state, &sp->so_states, open_states) {
b79a4a1b
TM
1135 if (mark_reclaim(clp, state))
1136 nfs4_clear_open_state(state);
cee54fc9 1137 }
ec073428 1138 spin_unlock(&sp->so_lock);
cee54fc9
TM
1139 }
1140}
1141
b79a4a1b
TM
1142static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1143{
1144 /* Mark all delegations for reclaim */
1145 nfs_delegation_mark_reclaim(clp);
1146 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1147}
1148
fce5c838
RL
1149static void nfs4_reclaim_complete(struct nfs_client *clp,
1150 const struct nfs4_state_recovery_ops *ops)
1151{
1152 /* Notify the server we're done reclaiming our state */
1153 if (ops->reclaim_complete)
1154 (void)ops->reclaim_complete(clp);
1155}
1156
6eaa6149 1157static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
b79a4a1b
TM
1158{
1159 struct nfs4_state_owner *sp;
1160 struct rb_node *pos;
1161 struct nfs4_state *state;
1162
1163 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
6eaa6149 1164 return 0;
7cab89b2 1165
b79a4a1b
TM
1166 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1167 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1168 spin_lock(&sp->so_lock);
1169 list_for_each_entry(state, &sp->so_states, open_states) {
1170 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
1171 continue;
1172 nfs4_state_mark_reclaim_nograce(clp, state);
1173 }
1174 spin_unlock(&sp->so_lock);
1175 }
1176
1177 nfs_delegation_reap_unclaimed(clp);
6eaa6149
TM
1178 return 1;
1179}
1180
1181static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1182{
1183 if (!nfs4_state_clear_reclaim_reboot(clp))
1184 return;
1185 nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
b79a4a1b
TM
1186}
1187
1188static void nfs_delegation_clear_all(struct nfs_client *clp)
1189{
1190 nfs_delegation_mark_reclaim(clp);
1191 nfs_delegation_reap_unclaimed(clp);
1192}
1193
1194static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1195{
1196 nfs_delegation_clear_all(clp);
1197 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1198}
1199
168667c4
TM
1200static void nfs4_warn_keyexpired(const char *s)
1201{
1202 printk_ratelimited(KERN_WARNING "Error: state manager"
1203 " encountered RPCSEC_GSS session"
1204 " expired against NFSv4 server %s.\n",
1205 s);
1206}
1207
4f7cdf18 1208static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
e598d843
TM
1209{
1210 switch (error) {
1211 case -NFS4ERR_CB_PATH_DOWN:
707fb4b3 1212 nfs_handle_cb_pathdown(clp);
4f7cdf18 1213 return 0;
c8b7ae3d
TM
1214 case -NFS4ERR_NO_GRACE:
1215 nfs4_state_end_reclaim_reboot(clp);
1216 return 0;
e598d843
TM
1217 case -NFS4ERR_STALE_CLIENTID:
1218 case -NFS4ERR_LEASE_MOVED:
1219 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
6eaa6149 1220 nfs4_state_clear_reclaim_reboot(clp);
e598d843
TM
1221 nfs4_state_start_reclaim_reboot(clp);
1222 break;
1223 case -NFS4ERR_EXPIRED:
1224 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1225 nfs4_state_start_reclaim_nograce(clp);
8ba9bf8e 1226 break;
c3fad1b1
AA
1227 case -NFS4ERR_BADSESSION:
1228 case -NFS4ERR_BADSLOT:
1229 case -NFS4ERR_BAD_HIGH_SLOT:
1230 case -NFS4ERR_DEADSESSION:
1231 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1232 case -NFS4ERR_SEQ_FALSE_RETRY:
1233 case -NFS4ERR_SEQ_MISORDERED:
6df08189 1234 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
0b9e2d41
AA
1235 /* Zero session reset errors */
1236 return 0;
168667c4
TM
1237 case -EKEYEXPIRED:
1238 /* Nothing we can do */
1239 nfs4_warn_keyexpired(clp->cl_hostname);
1240 return 0;
e598d843 1241 }
4f7cdf18 1242 return error;
e598d843
TM
1243}
1244
02860014 1245static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1da177e4 1246{
9f958ab8 1247 struct rb_node *pos;
1da177e4
LT
1248 int status = 0;
1249
7eff03ae
TM
1250restart:
1251 spin_lock(&clp->cl_lock);
02860014
TM
1252 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1253 struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
7eff03ae
TM
1254 if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
1255 continue;
1256 atomic_inc(&sp->so_count);
1257 spin_unlock(&clp->cl_lock);
02860014 1258 status = nfs4_reclaim_open_state(sp, ops);
7eff03ae
TM
1259 if (status < 0) {
1260 set_bit(ops->owner_flag_bit, &sp->so_flags);
1261 nfs4_put_state_owner(sp);
4f7cdf18 1262 return nfs4_recovery_handle_error(clp, status);
7eff03ae
TM
1263 }
1264 nfs4_put_state_owner(sp);
1265 goto restart;
02860014 1266 }
7eff03ae 1267 spin_unlock(&clp->cl_lock);
02860014
TM
1268 return status;
1269}
1270
1271static int nfs4_check_lease(struct nfs_client *clp)
1272{
1273 struct rpc_cred *cred;
c48f4f35
TM
1274 const struct nfs4_state_maintenance_ops *ops =
1275 clp->cl_mvops->state_renewal_ops;
02860014 1276 int status = -NFS4ERR_EXPIRED;
1da177e4 1277
0f605b56
TM
1278 /* Is the client already known to have an expired lease? */
1279 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1280 return 0;
a7b72103
AA
1281 spin_lock(&clp->cl_lock);
1282 cred = ops->get_state_renewal_cred_locked(clp);
1283 spin_unlock(&clp->cl_lock);
0f605b56
TM
1284 if (cred == NULL) {
1285 cred = nfs4_get_setclientid_cred(clp);
1286 if (cred == NULL)
1287 goto out;
286d7d6a 1288 }
8e69514f 1289 status = ops->renew_lease(clp, cred);
0f605b56
TM
1290 put_rpccred(cred);
1291out:
4f7cdf18 1292 return nfs4_recovery_handle_error(clp, status);
02860014
TM
1293}
1294
1295static int nfs4_reclaim_lease(struct nfs_client *clp)
1296{
1297 struct rpc_cred *cred;
c48f4f35
TM
1298 const struct nfs4_state_recovery_ops *ops =
1299 clp->cl_mvops->reboot_recovery_ops;
02860014
TM
1300 int status = -ENOENT;
1301
90a16617 1302 cred = ops->get_clid_cred(clp);
286d7d6a 1303 if (cred != NULL) {
591d71cb 1304 status = ops->establish_clid(clp, cred);
286d7d6a 1305 put_rpccred(cred);
a2b2bb88
TM
1306 /* Handle case where the user hasn't set up machine creds */
1307 if (status == -EACCES && cred == clp->cl_machine_cred) {
1308 nfs4_clear_machine_cred(clp);
02860014 1309 status = -EAGAIN;
a2b2bb88 1310 }
c2e713dd
BH
1311 if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
1312 status = -EPROTONOSUPPORT;
286d7d6a 1313 }
02860014
TM
1314 return status;
1315}
1316
76db6d95 1317#ifdef CONFIG_NFS_V4_1
b9efa1b2
AA
1318void nfs41_handle_recall_slot(struct nfs_client *clp)
1319{
1320 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1321 nfs4_schedule_state_recovery(clp);
1322}
1323
0f79fd6f
TM
1324static void nfs4_reset_all_state(struct nfs_client *clp)
1325{
1326 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1327 clp->cl_boot_time = CURRENT_TIME;
1328 nfs4_state_start_reclaim_nograce(clp);
1329 nfs4_schedule_state_recovery(clp);
1330 }
1331}
1332
1333static void nfs41_handle_server_reboot(struct nfs_client *clp)
1334{
1335 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1336 nfs4_state_start_reclaim_reboot(clp);
1337 nfs4_schedule_state_recovery(clp);
1338 }
1339}
1340
1341static void nfs41_handle_state_revoked(struct nfs_client *clp)
1342{
1343 /* Temporary */
1344 nfs4_reset_all_state(clp);
1345}
1346
1347static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
1348{
1349 /* This will need to handle layouts too */
1350 nfs_expire_all_delegations(clp);
1351}
1352
1353static void nfs41_handle_cb_path_down(struct nfs_client *clp)
1354{
1355 nfs_expire_all_delegations(clp);
1356 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
1357 nfs4_schedule_state_recovery(clp);
1358}
1359
0629e370
AB
1360void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
1361{
1362 if (!flags)
1363 return;
0f79fd6f
TM
1364 else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
1365 nfs41_handle_server_reboot(clp);
1366 else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
0629e370
AB
1367 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
1368 SEQ4_STATUS_ADMIN_STATE_REVOKED |
0f79fd6f
TM
1369 SEQ4_STATUS_LEASE_MOVED))
1370 nfs41_handle_state_revoked(clp);
1371 else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
1372 nfs41_handle_recallable_state_revoked(clp);
1373 else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
0629e370
AB
1374 SEQ4_STATUS_BACKCHANNEL_FAULT |
1375 SEQ4_STATUS_CB_PATH_DOWN_SESSION))
0f79fd6f 1376 nfs41_handle_cb_path_down(clp);
0629e370
AB
1377}
1378
c3fad1b1
AA
1379static int nfs4_reset_session(struct nfs_client *clp)
1380{
1381 int status;
1382
38045412 1383 nfs4_begin_drain_session(clp);
c3fad1b1
AA
1384 status = nfs4_proc_destroy_session(clp->cl_session);
1385 if (status && status != -NFS4ERR_BADSESSION &&
1386 status != -NFS4ERR_DEADSESSION) {
f455848a 1387 status = nfs4_recovery_handle_error(clp, status);
c3fad1b1
AA
1388 goto out;
1389 }
1390
1391 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
f26468fb 1392 status = nfs4_proc_create_session(clp);
41f54a55 1393 if (status) {
f455848a 1394 status = nfs4_recovery_handle_error(clp, status);
41f54a55
AA
1395 goto out;
1396 }
1397 /* create_session negotiated new slot table */
1398 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
9dfdf404 1399
41f54a55
AA
1400 /* Let the state manager reestablish state */
1401 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
5601a00d 1402 nfs41_setup_state_renewal(clp);
41f54a55 1403out:
c3fad1b1
AA
1404 return status;
1405}
76db6d95 1406
b9efa1b2
AA
1407static int nfs4_recall_slot(struct nfs_client *clp)
1408{
1409 struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table;
1410 struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs;
1411 struct nfs4_slot *new, *old;
1412 int i;
1413
1414 nfs4_begin_drain_session(clp);
1415 new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
8535b2be 1416 GFP_NOFS);
b9efa1b2
AA
1417 if (!new)
1418 return -ENOMEM;
1419
1420 spin_lock(&fc_tbl->slot_tbl_lock);
1421 for (i = 0; i < fc_tbl->target_max_slots; i++)
1422 new[i].seq_nr = fc_tbl->slots[i].seq_nr;
1423 old = fc_tbl->slots;
1424 fc_tbl->slots = new;
1425 fc_tbl->max_slots = fc_tbl->target_max_slots;
1426 fc_tbl->target_max_slots = 0;
1427 fc_attrs->max_reqs = fc_tbl->max_slots;
1428 spin_unlock(&fc_tbl->slot_tbl_lock);
1429
1430 kfree(old);
1431 nfs4_end_drain_session(clp);
1432 return 0;
1433}
1434
76db6d95 1435#else /* CONFIG_NFS_V4_1 */
c3fad1b1 1436static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
5601a00d 1437static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
b9efa1b2 1438static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
76db6d95
AA
1439#endif /* CONFIG_NFS_V4_1 */
1440
78722e9c
AA
1441/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
1442 * on EXCHANGE_ID for v4.1
1443 */
1444static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
1445{
1446 if (nfs4_has_session(clp)) {
1447 switch (status) {
1448 case -NFS4ERR_DELAY:
1449 case -NFS4ERR_CLID_INUSE:
1450 case -EAGAIN:
1451 break;
1452
168667c4
TM
1453 case -EKEYEXPIRED:
1454 nfs4_warn_keyexpired(clp->cl_hostname);
78722e9c
AA
1455 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1456 * in nfs4_exchange_id */
1457 default:
1458 return;
1459 }
1460 }
1461 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1462}
1463
e005e804 1464static void nfs4_state_manager(struct nfs_client *clp)
02860014 1465{
02860014
TM
1466 int status = 0;
1467
02860014 1468 /* Ensure exclusive access to NFSv4 state */
f3c76491 1469 for(;;) {
b79a4a1b
TM
1470 if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1471 /* We're going to have to re-establish a clientid */
1472 status = nfs4_reclaim_lease(clp);
1473 if (status) {
78722e9c 1474 nfs4_set_lease_expired(clp, status);
b6d408ba
TM
1475 if (test_bit(NFS4CLNT_LEASE_EXPIRED,
1476 &clp->cl_state))
b79a4a1b 1477 continue;
76db6d95
AA
1478 if (clp->cl_cons_state ==
1479 NFS_CS_SESSION_INITING)
1480 nfs_mark_client_ready(clp, status);
b79a4a1b
TM
1481 goto out_error;
1482 }
e598d843 1483 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
7cab89b2 1484 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
974cec8c 1485 pnfs_destroy_all_layouts(clp);
e598d843
TM
1486 }
1487
1488 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
1489 status = nfs4_check_lease(clp);
b6d408ba 1490 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
e598d843 1491 continue;
b6d408ba
TM
1492 if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
1493 goto out_error;
b79a4a1b 1494 }
b6d408ba 1495
c3fad1b1 1496 /* Initialize or reset the session */
6df08189 1497 if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
7111dc73 1498 && nfs4_has_session(clp)) {
4d643d1d 1499 status = nfs4_reset_session(clp);
b6d408ba
TM
1500 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1501 continue;
1502 if (status < 0)
76db6d95 1503 goto out_error;
76db6d95 1504 }
b6d408ba 1505
b79a4a1b 1506 /* First recover reboot state... */
e345e88a 1507 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
591d71cb 1508 status = nfs4_do_reclaim(clp,
c48f4f35 1509 clp->cl_mvops->reboot_recovery_ops);
b6d408ba 1510 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
6df08189 1511 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
c3fad1b1 1512 continue;
b79a4a1b 1513 nfs4_state_end_reclaim_reboot(clp);
b6d408ba
TM
1514 if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
1515 continue;
1516 if (status < 0)
1517 goto out_error;
02860014
TM
1518 }
1519
b79a4a1b
TM
1520 /* Now recover expired state... */
1521 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
591d71cb 1522 status = nfs4_do_reclaim(clp,
c48f4f35 1523 clp->cl_mvops->nograce_recovery_ops);
b6d408ba 1524 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
6df08189 1525 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
b6d408ba
TM
1526 test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1527 continue;
1528 if (status < 0)
b79a4a1b 1529 goto out_error;
1da177e4 1530 }
707fb4b3 1531
5601a00d 1532 nfs4_end_drain_session(clp);
707fb4b3
TM
1533 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
1534 nfs_client_return_marked_delegations(clp);
1535 continue;
1536 }
b9efa1b2
AA
1537 /* Recall session slots */
1538 if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)
1539 && nfs4_has_session(clp)) {
1540 status = nfs4_recall_slot(clp);
1541 if (status < 0)
1542 goto out_error;
1543 continue;
1544 }
1545
e005e804
TM
1546
1547 nfs4_clear_state_manager_bit(clp);
f3c76491
TM
1548 /* Did we race with an attempt to give us more work? */
1549 if (clp->cl_state == 0)
1550 break;
1551 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1552 break;
1da177e4 1553 }
e005e804 1554 return;
1da177e4 1555out_error:
e005e804 1556 printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
5d8515ca 1557 " with error %d\n", clp->cl_hostname, -status);
5601a00d 1558 nfs4_end_drain_session(clp);
e005e804
TM
1559 nfs4_clear_state_manager_bit(clp);
1560}
1561
1562static int nfs4_run_state_manager(void *ptr)
1563{
1564 struct nfs_client *clp = ptr;
1565
1566 allow_signal(SIGKILL);
1567 nfs4_state_manager(clp);
1568 nfs_put_client(clp);
1569 module_put_and_exit(0);
1570 return 0;
1da177e4
LT
1571}
1572
1573/*
1574 * Local variables:
1575 * c-basic-offset: 8
1576 * End:
1577 */