]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/lockd/clntlock.c | |
3 | * | |
4 | * Lock handling for the client side NLM implementation | |
5 | * | |
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/types.h> | |
11 | #include <linux/time.h> | |
12 | #include <linux/nfs_fs.h> | |
13 | #include <linux/sunrpc/clnt.h> | |
14 | #include <linux/sunrpc/svc.h> | |
15 | #include <linux/lockd/lockd.h> | |
16 | #include <linux/smp_lock.h> | |
17 | ||
18 | #define NLMDBG_FACILITY NLMDBG_CLIENT | |
19 | ||
20 | /* | |
21 | * Local function prototypes | |
22 | */ | |
23 | static int reclaimer(void *ptr); | |
24 | ||
25 | /* | |
26 | * The following functions handle blocking and granting from the | |
27 | * client perspective. | |
28 | */ | |
29 | ||
30 | /* | |
31 | * This is the representation of a blocked client lock. | |
32 | */ | |
33 | struct nlm_wait { | |
34 | struct nlm_wait * b_next; /* linked list */ | |
35 | wait_queue_head_t b_wait; /* where to wait on */ | |
36 | struct nlm_host * b_host; | |
37 | struct file_lock * b_lock; /* local file lock */ | |
38 | unsigned short b_reclaim; /* got to reclaim lock */ | |
39 | u32 b_status; /* grant callback status */ | |
40 | }; | |
41 | ||
42 | static struct nlm_wait * nlm_blocked; | |
43 | ||
44 | /* | |
45 | * Block on a lock | |
46 | */ | |
47 | int | |
48 | nlmclnt_block(struct nlm_host *host, struct file_lock *fl, u32 *statp) | |
49 | { | |
50 | struct nlm_wait block, **head; | |
51 | int err; | |
52 | u32 pstate; | |
53 | ||
54 | block.b_host = host; | |
55 | block.b_lock = fl; | |
56 | init_waitqueue_head(&block.b_wait); | |
57 | block.b_status = NLM_LCK_BLOCKED; | |
58 | block.b_next = nlm_blocked; | |
59 | nlm_blocked = █ | |
60 | ||
61 | /* Remember pseudo nsm state */ | |
62 | pstate = host->h_state; | |
63 | ||
64 | /* Go to sleep waiting for GRANT callback. Some servers seem | |
65 | * to lose callbacks, however, so we're going to poll from | |
66 | * time to time just to make sure. | |
67 | * | |
68 | * For now, the retry frequency is pretty high; normally | |
69 | * a 1 minute timeout would do. See the comment before | |
70 | * nlmclnt_lock for an explanation. | |
71 | */ | |
72 | sleep_on_timeout(&block.b_wait, 30*HZ); | |
73 | ||
74 | for (head = &nlm_blocked; *head; head = &(*head)->b_next) { | |
75 | if (*head == &block) { | |
76 | *head = block.b_next; | |
77 | break; | |
78 | } | |
79 | } | |
80 | ||
81 | if (!signalled()) { | |
82 | *statp = block.b_status; | |
83 | return 0; | |
84 | } | |
85 | ||
86 | /* Okay, we were interrupted. Cancel the pending request | |
87 | * unless the server has rebooted. | |
88 | */ | |
89 | if (pstate == host->h_state && (err = nlmclnt_cancel(host, fl)) < 0) | |
90 | printk(KERN_NOTICE | |
91 | "lockd: CANCEL call failed (errno %d)\n", -err); | |
92 | ||
93 | return -ERESTARTSYS; | |
94 | } | |
95 | ||
96 | /* | |
97 | * The server lockd has called us back to tell us the lock was granted | |
98 | */ | |
99 | u32 | |
100 | nlmclnt_grant(struct nlm_lock *lock) | |
101 | { | |
102 | struct nlm_wait *block; | |
103 | ||
104 | /* | |
105 | * Look up blocked request based on arguments. | |
106 | * Warning: must not use cookie to match it! | |
107 | */ | |
108 | for (block = nlm_blocked; block; block = block->b_next) { | |
109 | if (nlm_compare_locks(block->b_lock, &lock->fl)) | |
110 | break; | |
111 | } | |
112 | ||
113 | /* Ooops, no blocked request found. */ | |
114 | if (block == NULL) | |
115 | return nlm_lck_denied; | |
116 | ||
117 | /* Alright, we found the lock. Set the return status and | |
118 | * wake up the caller. | |
119 | */ | |
120 | block->b_status = NLM_LCK_GRANTED; | |
121 | wake_up(&block->b_wait); | |
122 | ||
123 | return nlm_granted; | |
124 | } | |
125 | ||
126 | /* | |
127 | * The following procedures deal with the recovery of locks after a | |
128 | * server crash. | |
129 | */ | |
130 | ||
131 | /* | |
132 | * Mark the locks for reclaiming. | |
133 | * FIXME: In 2.5 we don't want to iterate through any global file_lock_list. | |
134 | * Maintain NLM lock reclaiming lists in the nlm_host instead. | |
135 | */ | |
136 | static | |
137 | void nlmclnt_mark_reclaim(struct nlm_host *host) | |
138 | { | |
139 | struct file_lock *fl; | |
140 | struct inode *inode; | |
141 | struct list_head *tmp; | |
142 | ||
143 | list_for_each(tmp, &file_lock_list) { | |
144 | fl = list_entry(tmp, struct file_lock, fl_link); | |
145 | ||
146 | inode = fl->fl_file->f_dentry->d_inode; | |
147 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | |
148 | continue; | |
149 | if (fl->fl_u.nfs_fl.owner->host != host) | |
150 | continue; | |
151 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED)) | |
152 | continue; | |
153 | fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM; | |
154 | } | |
155 | } | |
156 | ||
157 | /* | |
158 | * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number, | |
159 | * that we mark locks for reclaiming, and that we bump the pseudo NSM state. | |
160 | */ | |
161 | static inline | |
162 | void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate) | |
163 | { | |
164 | host->h_monitored = 0; | |
165 | host->h_nsmstate = newstate; | |
166 | host->h_state++; | |
167 | host->h_nextrebind = 0; | |
168 | nlm_rebind_host(host); | |
169 | nlmclnt_mark_reclaim(host); | |
170 | dprintk("NLM: reclaiming locks for host %s", host->h_name); | |
171 | } | |
172 | ||
173 | /* | |
174 | * Reclaim all locks on server host. We do this by spawning a separate | |
175 | * reclaimer thread. | |
176 | */ | |
177 | void | |
178 | nlmclnt_recovery(struct nlm_host *host, u32 newstate) | |
179 | { | |
180 | if (host->h_reclaiming++) { | |
181 | if (host->h_nsmstate == newstate) | |
182 | return; | |
183 | nlmclnt_prepare_reclaim(host, newstate); | |
184 | } else { | |
185 | nlmclnt_prepare_reclaim(host, newstate); | |
186 | nlm_get_host(host); | |
187 | __module_get(THIS_MODULE); | |
188 | if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0) | |
189 | module_put(THIS_MODULE); | |
190 | } | |
191 | } | |
192 | ||
193 | static int | |
194 | reclaimer(void *ptr) | |
195 | { | |
196 | struct nlm_host *host = (struct nlm_host *) ptr; | |
197 | struct nlm_wait *block; | |
198 | struct list_head *tmp; | |
199 | struct file_lock *fl; | |
200 | struct inode *inode; | |
201 | ||
202 | daemonize("%s-reclaim", host->h_name); | |
203 | allow_signal(SIGKILL); | |
204 | ||
205 | /* This one ensures that our parent doesn't terminate while the | |
206 | * reclaim is in progress */ | |
207 | lock_kernel(); | |
208 | lockd_up(); | |
209 | ||
210 | /* First, reclaim all locks that have been marked. */ | |
211 | restart: | |
212 | list_for_each(tmp, &file_lock_list) { | |
213 | fl = list_entry(tmp, struct file_lock, fl_link); | |
214 | ||
215 | inode = fl->fl_file->f_dentry->d_inode; | |
216 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | |
217 | continue; | |
218 | if (fl->fl_u.nfs_fl.owner->host != host) | |
219 | continue; | |
220 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM)) | |
221 | continue; | |
222 | ||
223 | fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM; | |
224 | nlmclnt_reclaim(host, fl); | |
225 | if (signalled()) | |
226 | break; | |
227 | goto restart; | |
228 | } | |
229 | ||
230 | host->h_reclaiming = 0; | |
231 | ||
232 | /* Now, wake up all processes that sleep on a blocked lock */ | |
233 | for (block = nlm_blocked; block; block = block->b_next) { | |
234 | if (block->b_host == host) { | |
235 | block->b_status = NLM_LCK_DENIED_GRACE_PERIOD; | |
236 | wake_up(&block->b_wait); | |
237 | } | |
238 | } | |
239 | ||
240 | /* Release host handle after use */ | |
241 | nlm_release_host(host); | |
242 | lockd_down(); | |
243 | unlock_kernel(); | |
244 | module_put_and_exit(0); | |
245 | } |