]>
Commit | Line | Data |
---|---|---|
6714d8e8 KH |
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | |
3 | * | |
4 | * dlmrecovery.c | |
5 | * | |
6 | * recovery stuff | |
7 | * | |
8 | * Copyright (C) 2004 Oracle. All rights reserved. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public | |
12 | * License as published by the Free Software Foundation; either | |
13 | * version 2 of the License, or (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public | |
21 | * License along with this program; if not, write to the | |
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
23 | * Boston, MA 021110-1307, USA. | |
24 | * | |
25 | */ | |
26 | ||
27 | ||
28 | #include <linux/module.h> | |
29 | #include <linux/fs.h> | |
30 | #include <linux/types.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/highmem.h> | |
6714d8e8 KH |
33 | #include <linux/init.h> |
34 | #include <linux/sysctl.h> | |
35 | #include <linux/random.h> | |
36 | #include <linux/blkdev.h> | |
37 | #include <linux/socket.h> | |
38 | #include <linux/inet.h> | |
39 | #include <linux/timer.h> | |
40 | #include <linux/kthread.h> | |
b4c7f538 | 41 | #include <linux/delay.h> |
6714d8e8 KH |
42 | |
43 | ||
44 | #include "cluster/heartbeat.h" | |
45 | #include "cluster/nodemanager.h" | |
46 | #include "cluster/tcp.h" | |
47 | ||
48 | #include "dlmapi.h" | |
49 | #include "dlmcommon.h" | |
50 | #include "dlmdomain.h" | |
51 | ||
52 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) | |
53 | #include "cluster/masklog.h" | |
54 | ||
55 | static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); | |
56 | ||
57 | static int dlm_recovery_thread(void *data); | |
6714d8e8 KH |
58 | static int dlm_do_recovery(struct dlm_ctxt *dlm); |
59 | ||
60 | static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); | |
61 | static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); | |
62 | static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); | |
63 | static int dlm_request_all_locks(struct dlm_ctxt *dlm, | |
64 | u8 request_from, u8 dead_node); | |
65 | static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); | |
66 | ||
67 | static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); | |
68 | static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, | |
69 | const char *lockname, int namelen, | |
70 | int total_locks, u64 cookie, | |
71 | u8 flags, u8 master); | |
72 | static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | |
73 | struct dlm_migratable_lockres *mres, | |
74 | u8 send_to, | |
75 | struct dlm_lock_resource *res, | |
76 | int total_locks); | |
6714d8e8 KH |
77 | static int dlm_process_recovery_data(struct dlm_ctxt *dlm, |
78 | struct dlm_lock_resource *res, | |
79 | struct dlm_migratable_lockres *mres); | |
6714d8e8 KH |
80 | static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); |
81 | static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, | |
82 | u8 dead_node, u8 send_to); | |
83 | static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); | |
84 | static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, | |
85 | struct list_head *list, u8 dead_node); | |
86 | static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | |
87 | u8 dead_node, u8 new_master); | |
88 | static void dlm_reco_ast(void *astdata); | |
89 | static void dlm_reco_bast(void *astdata, int blocked_type); | |
90 | static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); | |
91 | static void dlm_request_all_locks_worker(struct dlm_work_item *item, | |
92 | void *data); | |
93 | static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); | |
8169cae5 AB |
94 | static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, |
95 | struct dlm_lock_resource *res, | |
96 | u8 *real_master); | |
6714d8e8 KH |
97 | |
98 | static u64 dlm_get_next_mig_cookie(void); | |
99 | ||
34af946a IM |
100 | static DEFINE_SPINLOCK(dlm_reco_state_lock); |
101 | static DEFINE_SPINLOCK(dlm_mig_cookie_lock); | |
6714d8e8 KH |
102 | static u64 dlm_mig_cookie = 1; |
103 | ||
104 | static u64 dlm_get_next_mig_cookie(void) | |
105 | { | |
106 | u64 c; | |
107 | spin_lock(&dlm_mig_cookie_lock); | |
108 | c = dlm_mig_cookie; | |
109 | if (dlm_mig_cookie == (~0ULL)) | |
110 | dlm_mig_cookie = 1; | |
111 | else | |
112 | dlm_mig_cookie++; | |
113 | spin_unlock(&dlm_mig_cookie_lock); | |
114 | return c; | |
115 | } | |
116 | ||
ab27eb6f KH |
117 | static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, |
118 | u8 dead_node) | |
119 | { | |
120 | assert_spin_locked(&dlm->spinlock); | |
121 | if (dlm->reco.dead_node != dead_node) | |
122 | mlog(0, "%s: changing dead_node from %u to %u\n", | |
123 | dlm->name, dlm->reco.dead_node, dead_node); | |
124 | dlm->reco.dead_node = dead_node; | |
125 | } | |
126 | ||
127 | static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, | |
128 | u8 master) | |
129 | { | |
130 | assert_spin_locked(&dlm->spinlock); | |
131 | mlog(0, "%s: changing new_master from %u to %u\n", | |
132 | dlm->name, dlm->reco.new_master, master); | |
133 | dlm->reco.new_master = master; | |
134 | } | |
135 | ||
466d1a45 | 136 | static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) |
6714d8e8 | 137 | { |
466d1a45 | 138 | assert_spin_locked(&dlm->spinlock); |
6714d8e8 | 139 | clear_bit(dlm->reco.dead_node, dlm->recovery_map); |
ab27eb6f KH |
140 | dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); |
141 | dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); | |
466d1a45 KH |
142 | } |
143 | ||
144 | static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) | |
145 | { | |
146 | spin_lock(&dlm->spinlock); | |
147 | __dlm_reset_recovery(dlm); | |
6714d8e8 KH |
148 | spin_unlock(&dlm->spinlock); |
149 | } | |
150 | ||
151 | /* Worker function used during recovery. */ | |
c4028958 | 152 | void dlm_dispatch_work(struct work_struct *work) |
6714d8e8 | 153 | { |
c4028958 DH |
154 | struct dlm_ctxt *dlm = |
155 | container_of(work, struct dlm_ctxt, dispatched_work); | |
6714d8e8 | 156 | LIST_HEAD(tmp_list); |
800deef3 | 157 | struct dlm_work_item *item, *next; |
6714d8e8 | 158 | dlm_workfunc_t *workfunc; |
3156d267 KH |
159 | int tot=0; |
160 | ||
6714d8e8 KH |
161 | spin_lock(&dlm->work_lock); |
162 | list_splice_init(&dlm->work_list, &tmp_list); | |
163 | spin_unlock(&dlm->work_lock); | |
164 | ||
800deef3 | 165 | list_for_each_entry(item, &tmp_list, list) { |
3156d267 KH |
166 | tot++; |
167 | } | |
168 | mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); | |
169 | ||
800deef3 | 170 | list_for_each_entry_safe(item, next, &tmp_list, list) { |
6714d8e8 KH |
171 | workfunc = item->func; |
172 | list_del_init(&item->list); | |
173 | ||
174 | /* already have ref on dlm to avoid having | |
175 | * it disappear. just double-check. */ | |
176 | BUG_ON(item->dlm != dlm); | |
177 | ||
178 | /* this is allowed to sleep and | |
179 | * call network stuff */ | |
180 | workfunc(item, item->data); | |
181 | ||
182 | dlm_put(dlm); | |
183 | kfree(item); | |
184 | } | |
185 | } | |
186 | ||
187 | /* | |
188 | * RECOVERY THREAD | |
189 | */ | |
190 | ||
c03872f5 | 191 | void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) |
6714d8e8 KH |
192 | { |
193 | /* wake the recovery thread | |
194 | * this will wake the reco thread in one of three places | |
195 | * 1) sleeping with no recovery happening | |
196 | * 2) sleeping with recovery mastered elsewhere | |
197 | * 3) recovery mastered here, waiting on reco data */ | |
198 | ||
199 | wake_up(&dlm->dlm_reco_thread_wq); | |
200 | } | |
201 | ||
202 | /* Launch the recovery thread */ | |
203 | int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) | |
204 | { | |
205 | mlog(0, "starting dlm recovery thread...\n"); | |
206 | ||
207 | dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, | |
208 | "dlm_reco_thread"); | |
209 | if (IS_ERR(dlm->dlm_reco_thread_task)) { | |
210 | mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); | |
211 | dlm->dlm_reco_thread_task = NULL; | |
212 | return -EINVAL; | |
213 | } | |
214 | ||
215 | return 0; | |
216 | } | |
217 | ||
218 | void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) | |
219 | { | |
220 | if (dlm->dlm_reco_thread_task) { | |
221 | mlog(0, "waiting for dlm recovery thread to exit\n"); | |
222 | kthread_stop(dlm->dlm_reco_thread_task); | |
223 | dlm->dlm_reco_thread_task = NULL; | |
224 | } | |
225 | } | |
226 | ||
227 | ||
228 | ||
229 | /* | |
230 | * this is lame, but here's how recovery works... | |
231 | * 1) all recovery threads cluster wide will work on recovering | |
232 | * ONE node at a time | |
233 | * 2) negotiate who will take over all the locks for the dead node. | |
234 | * thats right... ALL the locks. | |
235 | * 3) once a new master is chosen, everyone scans all locks | |
236 | * and moves aside those mastered by the dead guy | |
237 | * 4) each of these locks should be locked until recovery is done | |
238 | * 5) the new master collects up all of secondary lock queue info | |
239 | * one lock at a time, forcing each node to communicate back | |
240 | * before continuing | |
241 | * 6) each secondary lock queue responds with the full known lock info | |
242 | * 7) once the new master has run all its locks, it sends a ALLDONE! | |
243 | * message to everyone | |
244 | * 8) upon receiving this message, the secondary queue node unlocks | |
245 | * and responds to the ALLDONE | |
246 | * 9) once the new master gets responses from everyone, he unlocks | |
247 | * everything and recovery for this dead node is done | |
248 | *10) go back to 2) while there are still dead nodes | |
249 | * | |
250 | */ | |
251 | ||
d6dea6e9 KH |
252 | static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) |
253 | { | |
254 | struct dlm_reco_node_data *ndata; | |
255 | struct dlm_lock_resource *res; | |
256 | ||
257 | mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", | |
ba25f9dc | 258 | dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), |
d6dea6e9 KH |
259 | dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", |
260 | dlm->reco.dead_node, dlm->reco.new_master); | |
261 | ||
262 | list_for_each_entry(ndata, &dlm->reco.node_data, list) { | |
263 | char *st = "unknown"; | |
264 | switch (ndata->state) { | |
265 | case DLM_RECO_NODE_DATA_INIT: | |
266 | st = "init"; | |
267 | break; | |
268 | case DLM_RECO_NODE_DATA_REQUESTING: | |
269 | st = "requesting"; | |
270 | break; | |
271 | case DLM_RECO_NODE_DATA_DEAD: | |
272 | st = "dead"; | |
273 | break; | |
274 | case DLM_RECO_NODE_DATA_RECEIVING: | |
275 | st = "receiving"; | |
276 | break; | |
277 | case DLM_RECO_NODE_DATA_REQUESTED: | |
278 | st = "requested"; | |
279 | break; | |
280 | case DLM_RECO_NODE_DATA_DONE: | |
281 | st = "done"; | |
282 | break; | |
283 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | |
284 | st = "finalize-sent"; | |
285 | break; | |
286 | default: | |
287 | st = "bad"; | |
288 | break; | |
289 | } | |
290 | mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", | |
291 | dlm->name, ndata->node_num, st); | |
292 | } | |
293 | list_for_each_entry(res, &dlm->reco.resources, recovering) { | |
294 | mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", | |
295 | dlm->name, res->lockname.len, res->lockname.name); | |
296 | } | |
297 | } | |
6714d8e8 KH |
298 | |
299 | #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) | |
300 | ||
301 | static int dlm_recovery_thread(void *data) | |
302 | { | |
303 | int status; | |
304 | struct dlm_ctxt *dlm = data; | |
305 | unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); | |
306 | ||
307 | mlog(0, "dlm thread running for %s...\n", dlm->name); | |
308 | ||
309 | while (!kthread_should_stop()) { | |
bc9838c4 | 310 | if (dlm_domain_fully_joined(dlm)) { |
6714d8e8 KH |
311 | status = dlm_do_recovery(dlm); |
312 | if (status == -EAGAIN) { | |
313 | /* do not sleep, recheck immediately. */ | |
314 | continue; | |
315 | } | |
316 | if (status < 0) | |
317 | mlog_errno(status); | |
318 | } | |
319 | ||
320 | wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, | |
321 | kthread_should_stop(), | |
322 | timeout); | |
323 | } | |
324 | ||
325 | mlog(0, "quitting DLM recovery thread\n"); | |
326 | return 0; | |
327 | } | |
328 | ||
e2faea4c KH |
329 | /* returns true when the recovery master has contacted us */ |
330 | static int dlm_reco_master_ready(struct dlm_ctxt *dlm) | |
331 | { | |
332 | int ready; | |
333 | spin_lock(&dlm->spinlock); | |
334 | ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM); | |
335 | spin_unlock(&dlm->spinlock); | |
336 | return ready; | |
337 | } | |
338 | ||
339 | /* returns true if node is no longer in the domain | |
340 | * could be dead or just not joined */ | |
341 | int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) | |
342 | { | |
343 | int dead; | |
344 | spin_lock(&dlm->spinlock); | |
aba9aac7 | 345 | dead = !test_bit(node, dlm->domain_map); |
e2faea4c KH |
346 | spin_unlock(&dlm->spinlock); |
347 | return dead; | |
348 | } | |
349 | ||
b7084ab5 KH |
350 | /* returns true if node is no longer in the domain |
351 | * could be dead or just not joined */ | |
3fb5a989 | 352 | static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) |
b7084ab5 KH |
353 | { |
354 | int recovered; | |
355 | spin_lock(&dlm->spinlock); | |
356 | recovered = !test_bit(node, dlm->recovery_map); | |
357 | spin_unlock(&dlm->spinlock); | |
358 | return recovered; | |
359 | } | |
360 | ||
361 | ||
ed8625c6 | 362 | void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) |
44465a7d | 363 | { |
ed8625c6 SM |
364 | if (dlm_is_node_dead(dlm, node)) |
365 | return; | |
366 | ||
367 | printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in " | |
368 | "domain %s\n", node, dlm->name); | |
369 | ||
370 | if (timeout) | |
44465a7d | 371 | wait_event_timeout(dlm->dlm_reco_thread_wq, |
ed8625c6 SM |
372 | dlm_is_node_dead(dlm, node), |
373 | msecs_to_jiffies(timeout)); | |
374 | else | |
44465a7d KH |
375 | wait_event(dlm->dlm_reco_thread_wq, |
376 | dlm_is_node_dead(dlm, node)); | |
44465a7d KH |
377 | } |
378 | ||
ed8625c6 | 379 | void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) |
b7084ab5 | 380 | { |
ed8625c6 SM |
381 | if (dlm_is_node_recovered(dlm, node)) |
382 | return; | |
383 | ||
384 | printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in " | |
385 | "domain %s\n", node, dlm->name); | |
386 | ||
387 | if (timeout) | |
b7084ab5 | 388 | wait_event_timeout(dlm->dlm_reco_thread_wq, |
ed8625c6 SM |
389 | dlm_is_node_recovered(dlm, node), |
390 | msecs_to_jiffies(timeout)); | |
391 | else | |
b7084ab5 KH |
392 | wait_event(dlm->dlm_reco_thread_wq, |
393 | dlm_is_node_recovered(dlm, node)); | |
b7084ab5 KH |
394 | } |
395 | ||
6714d8e8 KH |
396 | /* callers of the top-level api calls (dlmlock/dlmunlock) should |
397 | * block on the dlm->reco.event when recovery is in progress. | |
398 | * the dlm recovery thread will set this state when it begins | |
399 | * recovering a dead node (as the new master or not) and clear | |
400 | * the state and wake as soon as all affected lock resources have | |
401 | * been marked with the RECOVERY flag */ | |
402 | static int dlm_in_recovery(struct dlm_ctxt *dlm) | |
403 | { | |
404 | int in_recovery; | |
405 | spin_lock(&dlm->spinlock); | |
406 | in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); | |
407 | spin_unlock(&dlm->spinlock); | |
408 | return in_recovery; | |
409 | } | |
410 | ||
411 | ||
412 | void dlm_wait_for_recovery(struct dlm_ctxt *dlm) | |
413 | { | |
56a7c104 | 414 | if (dlm_in_recovery(dlm)) { |
3b3b84a8 | 415 | mlog(0, "%s: reco thread %d in recovery: " |
56a7c104 | 416 | "state=%d, master=%u, dead=%u\n", |
ba25f9dc | 417 | dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), |
56a7c104 KH |
418 | dlm->reco.state, dlm->reco.new_master, |
419 | dlm->reco.dead_node); | |
420 | } | |
6714d8e8 KH |
421 | wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); |
422 | } | |
423 | ||
424 | static void dlm_begin_recovery(struct dlm_ctxt *dlm) | |
425 | { | |
426 | spin_lock(&dlm->spinlock); | |
427 | BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); | |
8decab3c SM |
428 | printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n", |
429 | dlm->name, dlm->reco.dead_node); | |
6714d8e8 KH |
430 | dlm->reco.state |= DLM_RECO_STATE_ACTIVE; |
431 | spin_unlock(&dlm->spinlock); | |
432 | } | |
433 | ||
434 | static void dlm_end_recovery(struct dlm_ctxt *dlm) | |
435 | { | |
436 | spin_lock(&dlm->spinlock); | |
437 | BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); | |
438 | dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; | |
439 | spin_unlock(&dlm->spinlock); | |
8decab3c | 440 | printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name); |
6714d8e8 KH |
441 | wake_up(&dlm->reco.event); |
442 | } | |
443 | ||
8decab3c SM |
444 | static void dlm_print_recovery_master(struct dlm_ctxt *dlm) |
445 | { | |
446 | printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the " | |
447 | "dead node %u in domain %s\n", dlm->reco.new_master, | |
448 | (dlm->node_num == dlm->reco.new_master ? "me" : "he"), | |
449 | dlm->reco.dead_node, dlm->name); | |
450 | } | |
451 | ||
6714d8e8 KH |
452 | static int dlm_do_recovery(struct dlm_ctxt *dlm) |
453 | { | |
454 | int status = 0; | |
e2faea4c | 455 | int ret; |
6714d8e8 KH |
456 | |
457 | spin_lock(&dlm->spinlock); | |
458 | ||
459 | /* check to see if the new master has died */ | |
460 | if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && | |
461 | test_bit(dlm->reco.new_master, dlm->recovery_map)) { | |
462 | mlog(0, "new master %u died while recovering %u!\n", | |
463 | dlm->reco.new_master, dlm->reco.dead_node); | |
464 | /* unset the new_master, leave dead_node */ | |
ab27eb6f | 465 | dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); |
6714d8e8 KH |
466 | } |
467 | ||
468 | /* select a target to recover */ | |
469 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { | |
470 | int bit; | |
471 | ||
f471c9df | 472 | bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0); |
6714d8e8 | 473 | if (bit >= O2NM_MAX_NODES || bit < 0) |
ab27eb6f | 474 | dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); |
6714d8e8 | 475 | else |
ab27eb6f | 476 | dlm_set_reco_dead_node(dlm, bit); |
6714d8e8 KH |
477 | } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { |
478 | /* BUG? */ | |
479 | mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", | |
480 | dlm->reco.dead_node); | |
ab27eb6f | 481 | dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); |
6714d8e8 KH |
482 | } |
483 | ||
484 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { | |
485 | // mlog(0, "nothing to recover! sleeping now!\n"); | |
486 | spin_unlock(&dlm->spinlock); | |
487 | /* return to main thread loop and sleep. */ | |
488 | return 0; | |
489 | } | |
d6dea6e9 | 490 | mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", |
ba25f9dc | 491 | dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), |
6714d8e8 KH |
492 | dlm->reco.dead_node); |
493 | spin_unlock(&dlm->spinlock); | |
494 | ||
495 | /* take write barrier */ | |
496 | /* (stops the list reshuffling thread, proxy ast handling) */ | |
497 | dlm_begin_recovery(dlm); | |
498 | ||
499 | if (dlm->reco.new_master == dlm->node_num) | |
500 | goto master_here; | |
501 | ||
502 | if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { | |
e2faea4c KH |
503 | /* choose a new master, returns 0 if this node |
504 | * is the master, -EEXIST if it's another node. | |
505 | * this does not return until a new master is chosen | |
506 | * or recovery completes entirely. */ | |
507 | ret = dlm_pick_recovery_master(dlm); | |
508 | if (!ret) { | |
6714d8e8 | 509 | /* already notified everyone. go. */ |
6714d8e8 KH |
510 | goto master_here; |
511 | } | |
512 | mlog(0, "another node will master this recovery session.\n"); | |
513 | } | |
8decab3c SM |
514 | |
515 | dlm_print_recovery_master(dlm); | |
6714d8e8 KH |
516 | |
517 | /* it is safe to start everything back up here | |
518 | * because all of the dead node's lock resources | |
519 | * have been marked as in-recovery */ | |
520 | dlm_end_recovery(dlm); | |
521 | ||
522 | /* sleep out in main dlm_recovery_thread loop. */ | |
523 | return 0; | |
524 | ||
525 | master_here: | |
8decab3c | 526 | dlm_print_recovery_master(dlm); |
6714d8e8 KH |
527 | |
528 | status = dlm_remaster_locks(dlm, dlm->reco.dead_node); | |
529 | if (status < 0) { | |
6a413211 | 530 | /* we should never hit this anymore */ |
8decab3c SM |
531 | mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, " |
532 | "retrying.\n", dlm->name, status, dlm->reco.dead_node); | |
e2faea4c KH |
533 | /* yield a bit to allow any final network messages |
534 | * to get handled on remaining nodes */ | |
535 | msleep(100); | |
6714d8e8 KH |
536 | } else { |
537 | /* success! see if any other nodes need recovery */ | |
e2faea4c KH |
538 | mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", |
539 | dlm->name, dlm->reco.dead_node, dlm->node_num); | |
6714d8e8 KH |
540 | dlm_reset_recovery(dlm); |
541 | } | |
542 | dlm_end_recovery(dlm); | |
543 | ||
544 | /* continue and look for another dead node */ | |
545 | return -EAGAIN; | |
546 | } | |
547 | ||
548 | static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) | |
549 | { | |
550 | int status = 0; | |
551 | struct dlm_reco_node_data *ndata; | |
6714d8e8 KH |
552 | int all_nodes_done; |
553 | int destroy = 0; | |
554 | int pass = 0; | |
555 | ||
6a413211 KH |
556 | do { |
557 | /* we have become recovery master. there is no escaping | |
558 | * this, so just keep trying until we get it. */ | |
559 | status = dlm_init_recovery_area(dlm, dead_node); | |
560 | if (status < 0) { | |
561 | mlog(ML_ERROR, "%s: failed to alloc recovery area, " | |
562 | "retrying\n", dlm->name); | |
563 | msleep(1000); | |
564 | } | |
565 | } while (status != 0); | |
6714d8e8 KH |
566 | |
567 | /* safe to access the node data list without a lock, since this | |
568 | * process is the only one to change the list */ | |
800deef3 | 569 | list_for_each_entry(ndata, &dlm->reco.node_data, list) { |
6714d8e8 KH |
570 | BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); |
571 | ndata->state = DLM_RECO_NODE_DATA_REQUESTING; | |
572 | ||
8decab3c | 573 | mlog(0, "%s: Requesting lock info from node %u\n", dlm->name, |
6714d8e8 KH |
574 | ndata->node_num); |
575 | ||
576 | if (ndata->node_num == dlm->node_num) { | |
577 | ndata->state = DLM_RECO_NODE_DATA_DONE; | |
578 | continue; | |
579 | } | |
580 | ||
6a413211 KH |
581 | do { |
582 | status = dlm_request_all_locks(dlm, ndata->node_num, | |
583 | dead_node); | |
584 | if (status < 0) { | |
585 | mlog_errno(status); | |
586 | if (dlm_is_host_down(status)) { | |
587 | /* node died, ignore it for recovery */ | |
588 | status = 0; | |
589 | ndata->state = DLM_RECO_NODE_DATA_DEAD; | |
590 | /* wait for the domain map to catch up | |
591 | * with the network state. */ | |
592 | wait_event_timeout(dlm->dlm_reco_thread_wq, | |
593 | dlm_is_node_dead(dlm, | |
594 | ndata->node_num), | |
595 | msecs_to_jiffies(1000)); | |
596 | mlog(0, "waited 1 sec for %u, " | |
597 | "dead? %s\n", ndata->node_num, | |
598 | dlm_is_node_dead(dlm, ndata->node_num) ? | |
599 | "yes" : "no"); | |
600 | } else { | |
601 | /* -ENOMEM on the other node */ | |
602 | mlog(0, "%s: node %u returned " | |
603 | "%d during recovery, retrying " | |
604 | "after a short wait\n", | |
605 | dlm->name, ndata->node_num, | |
606 | status); | |
607 | msleep(100); | |
608 | } | |
6714d8e8 | 609 | } |
6a413211 | 610 | } while (status != 0); |
6714d8e8 | 611 | |
756a1501 | 612 | spin_lock(&dlm_reco_state_lock); |
6714d8e8 KH |
613 | switch (ndata->state) { |
614 | case DLM_RECO_NODE_DATA_INIT: | |
615 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | |
616 | case DLM_RECO_NODE_DATA_REQUESTED: | |
617 | BUG(); | |
618 | break; | |
619 | case DLM_RECO_NODE_DATA_DEAD: | |
620 | mlog(0, "node %u died after requesting " | |
621 | "recovery info for node %u\n", | |
622 | ndata->node_num, dead_node); | |
6a413211 KH |
623 | /* fine. don't need this node's info. |
624 | * continue without it. */ | |
625 | break; | |
6714d8e8 KH |
626 | case DLM_RECO_NODE_DATA_REQUESTING: |
627 | ndata->state = DLM_RECO_NODE_DATA_REQUESTED; | |
628 | mlog(0, "now receiving recovery data from " | |
629 | "node %u for dead node %u\n", | |
630 | ndata->node_num, dead_node); | |
631 | break; | |
632 | case DLM_RECO_NODE_DATA_RECEIVING: | |
633 | mlog(0, "already receiving recovery data from " | |
634 | "node %u for dead node %u\n", | |
635 | ndata->node_num, dead_node); | |
636 | break; | |
637 | case DLM_RECO_NODE_DATA_DONE: | |
638 | mlog(0, "already DONE receiving recovery data " | |
639 | "from node %u for dead node %u\n", | |
640 | ndata->node_num, dead_node); | |
641 | break; | |
642 | } | |
756a1501 | 643 | spin_unlock(&dlm_reco_state_lock); |
6714d8e8 KH |
644 | } |
645 | ||
8decab3c | 646 | mlog(0, "%s: Done requesting all lock info\n", dlm->name); |
6714d8e8 KH |
647 | |
648 | /* nodes should be sending reco data now | |
649 | * just need to wait */ | |
650 | ||
651 | while (1) { | |
652 | /* check all the nodes now to see if we are | |
653 | * done, or if anyone died */ | |
654 | all_nodes_done = 1; | |
655 | spin_lock(&dlm_reco_state_lock); | |
800deef3 | 656 | list_for_each_entry(ndata, &dlm->reco.node_data, list) { |
6714d8e8 KH |
657 | mlog(0, "checking recovery state of node %u\n", |
658 | ndata->node_num); | |
659 | switch (ndata->state) { | |
660 | case DLM_RECO_NODE_DATA_INIT: | |
661 | case DLM_RECO_NODE_DATA_REQUESTING: | |
662 | mlog(ML_ERROR, "bad ndata state for " | |
663 | "node %u: state=%d\n", | |
664 | ndata->node_num, ndata->state); | |
665 | BUG(); | |
666 | break; | |
667 | case DLM_RECO_NODE_DATA_DEAD: | |
6a413211 | 668 | mlog(0, "node %u died after " |
6714d8e8 KH |
669 | "requesting recovery info for " |
670 | "node %u\n", ndata->node_num, | |
671 | dead_node); | |
6a413211 | 672 | break; |
6714d8e8 KH |
673 | case DLM_RECO_NODE_DATA_RECEIVING: |
674 | case DLM_RECO_NODE_DATA_REQUESTED: | |
d6dea6e9 KH |
675 | mlog(0, "%s: node %u still in state %s\n", |
676 | dlm->name, ndata->node_num, | |
677 | ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? | |
678 | "receiving" : "requested"); | |
6714d8e8 KH |
679 | all_nodes_done = 0; |
680 | break; | |
681 | case DLM_RECO_NODE_DATA_DONE: | |
d6dea6e9 KH |
682 | mlog(0, "%s: node %u state is done\n", |
683 | dlm->name, ndata->node_num); | |
6714d8e8 KH |
684 | break; |
685 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | |
d6dea6e9 KH |
686 | mlog(0, "%s: node %u state is finalize\n", |
687 | dlm->name, ndata->node_num); | |
6714d8e8 KH |
688 | break; |
689 | } | |
690 | } | |
691 | spin_unlock(&dlm_reco_state_lock); | |
692 | ||
693 | mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, | |
694 | all_nodes_done?"yes":"no"); | |
695 | if (all_nodes_done) { | |
696 | int ret; | |
697 | ||
698 | /* all nodes are now in DLM_RECO_NODE_DATA_DONE state | |
699 | * just send a finalize message to everyone and | |
700 | * clean up */ | |
701 | mlog(0, "all nodes are done! send finalize\n"); | |
702 | ret = dlm_send_finalize_reco_message(dlm); | |
703 | if (ret < 0) | |
704 | mlog_errno(ret); | |
705 | ||
706 | spin_lock(&dlm->spinlock); | |
707 | dlm_finish_local_lockres_recovery(dlm, dead_node, | |
708 | dlm->node_num); | |
709 | spin_unlock(&dlm->spinlock); | |
710 | mlog(0, "should be done with recovery!\n"); | |
711 | ||
712 | mlog(0, "finishing recovery of %s at %lu, " | |
713 | "dead=%u, this=%u, new=%u\n", dlm->name, | |
714 | jiffies, dlm->reco.dead_node, | |
715 | dlm->node_num, dlm->reco.new_master); | |
716 | destroy = 1; | |
6a413211 | 717 | status = 0; |
6714d8e8 KH |
718 | /* rescan everything marked dirty along the way */ |
719 | dlm_kick_thread(dlm, NULL); | |
720 | break; | |
721 | } | |
722 | /* wait to be signalled, with periodic timeout | |
723 | * to check for node death */ | |
724 | wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, | |
725 | kthread_should_stop(), | |
726 | msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); | |
727 | ||
728 | } | |
729 | ||
6714d8e8 KH |
730 | if (destroy) |
731 | dlm_destroy_recovery_area(dlm, dead_node); | |
732 | ||
6714d8e8 KH |
733 | return status; |
734 | } | |
735 | ||
736 | static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) | |
737 | { | |
738 | int num=0; | |
739 | struct dlm_reco_node_data *ndata; | |
740 | ||
741 | spin_lock(&dlm->spinlock); | |
742 | memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); | |
743 | /* nodes can only be removed (by dying) after dropping | |
744 | * this lock, and death will be trapped later, so this should do */ | |
745 | spin_unlock(&dlm->spinlock); | |
746 | ||
747 | while (1) { | |
748 | num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); | |
749 | if (num >= O2NM_MAX_NODES) { | |
750 | break; | |
751 | } | |
752 | BUG_ON(num == dead_node); | |
753 | ||
cd861280 | 754 | ndata = kzalloc(sizeof(*ndata), GFP_NOFS); |
6714d8e8 KH |
755 | if (!ndata) { |
756 | dlm_destroy_recovery_area(dlm, dead_node); | |
757 | return -ENOMEM; | |
758 | } | |
759 | ndata->node_num = num; | |
760 | ndata->state = DLM_RECO_NODE_DATA_INIT; | |
761 | spin_lock(&dlm_reco_state_lock); | |
762 | list_add_tail(&ndata->list, &dlm->reco.node_data); | |
763 | spin_unlock(&dlm_reco_state_lock); | |
764 | num++; | |
765 | } | |
766 | ||
767 | return 0; | |
768 | } | |
769 | ||
770 | static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) | |
771 | { | |
800deef3 | 772 | struct dlm_reco_node_data *ndata, *next; |
6714d8e8 KH |
773 | LIST_HEAD(tmplist); |
774 | ||
775 | spin_lock(&dlm_reco_state_lock); | |
776 | list_splice_init(&dlm->reco.node_data, &tmplist); | |
777 | spin_unlock(&dlm_reco_state_lock); | |
778 | ||
800deef3 | 779 | list_for_each_entry_safe(ndata, next, &tmplist, list) { |
6714d8e8 KH |
780 | list_del_init(&ndata->list); |
781 | kfree(ndata); | |
782 | } | |
783 | } | |
784 | ||
785 | static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, | |
786 | u8 dead_node) | |
787 | { | |
788 | struct dlm_lock_request lr; | |
22ab9014 | 789 | int ret; |
6714d8e8 KH |
790 | |
791 | mlog(0, "\n"); | |
792 | ||
793 | ||
794 | mlog(0, "dlm_request_all_locks: dead node is %u, sending request " | |
795 | "to %u\n", dead_node, request_from); | |
796 | ||
797 | memset(&lr, 0, sizeof(lr)); | |
798 | lr.node_idx = dlm->node_num; | |
799 | lr.dead_node = dead_node; | |
800 | ||
801 | // send message | |
6714d8e8 KH |
802 | ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, |
803 | &lr, sizeof(lr), request_from, NULL); | |
804 | ||
805 | /* negative status is handled by caller */ | |
806 | if (ret < 0) | |
8decab3c SM |
807 | mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " |
808 | "to recover dead node %u\n", dlm->name, ret, | |
809 | request_from, dead_node); | |
6714d8e8 KH |
810 | // return from here, then |
811 | // sleep until all received or error | |
812 | return ret; | |
813 | ||
814 | } | |
815 | ||
d74c9803 KH |
816 | int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, |
817 | void **ret_data) | |
6714d8e8 KH |
818 | { |
819 | struct dlm_ctxt *dlm = data; | |
820 | struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; | |
821 | char *buf = NULL; | |
822 | struct dlm_work_item *item = NULL; | |
823 | ||
824 | if (!dlm_grab(dlm)) | |
825 | return -EINVAL; | |
826 | ||
c3187ce5 KH |
827 | if (lr->dead_node != dlm->reco.dead_node) { |
828 | mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " | |
829 | "dead_node is %u\n", dlm->name, lr->node_idx, | |
830 | lr->dead_node, dlm->reco.dead_node); | |
d6dea6e9 | 831 | dlm_print_reco_node_status(dlm); |
c3187ce5 KH |
832 | /* this is a hack */ |
833 | dlm_put(dlm); | |
834 | return -ENOMEM; | |
835 | } | |
6714d8e8 KH |
836 | BUG_ON(lr->dead_node != dlm->reco.dead_node); |
837 | ||
cd861280 | 838 | item = kzalloc(sizeof(*item), GFP_NOFS); |
6714d8e8 KH |
839 | if (!item) { |
840 | dlm_put(dlm); | |
841 | return -ENOMEM; | |
842 | } | |
843 | ||
844 | /* this will get freed by dlm_request_all_locks_worker */ | |
ad8100e0 | 845 | buf = (char *) __get_free_page(GFP_NOFS); |
6714d8e8 KH |
846 | if (!buf) { |
847 | kfree(item); | |
848 | dlm_put(dlm); | |
849 | return -ENOMEM; | |
850 | } | |
851 | ||
852 | /* queue up work for dlm_request_all_locks_worker */ | |
853 | dlm_grab(dlm); /* get an extra ref for the work item */ | |
854 | dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); | |
855 | item->u.ral.reco_master = lr->node_idx; | |
856 | item->u.ral.dead_node = lr->dead_node; | |
857 | spin_lock(&dlm->work_lock); | |
858 | list_add_tail(&item->list, &dlm->work_list); | |
859 | spin_unlock(&dlm->work_lock); | |
3156d267 | 860 | queue_work(dlm->dlm_worker, &dlm->dispatched_work); |
6714d8e8 KH |
861 | |
862 | dlm_put(dlm); | |
863 | return 0; | |
864 | } | |
865 | ||
866 | static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) | |
867 | { | |
868 | struct dlm_migratable_lockres *mres; | |
869 | struct dlm_lock_resource *res; | |
870 | struct dlm_ctxt *dlm; | |
871 | LIST_HEAD(resources); | |
6714d8e8 KH |
872 | int ret; |
873 | u8 dead_node, reco_master; | |
29c0fa0f | 874 | int skip_all_done = 0; |
6714d8e8 KH |
875 | |
876 | dlm = item->dlm; | |
877 | dead_node = item->u.ral.dead_node; | |
878 | reco_master = item->u.ral.reco_master; | |
e2faea4c KH |
879 | mres = (struct dlm_migratable_lockres *)data; |
880 | ||
d6dea6e9 KH |
881 | mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", |
882 | dlm->name, dead_node, reco_master); | |
883 | ||
e2faea4c KH |
884 | if (dead_node != dlm->reco.dead_node || |
885 | reco_master != dlm->reco.new_master) { | |
6a413211 KH |
886 | /* worker could have been created before the recovery master |
887 | * died. if so, do not continue, but do not error. */ | |
888 | if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { | |
889 | mlog(ML_NOTICE, "%s: will not send recovery state, " | |
890 | "recovery master %u died, thread=(dead=%u,mas=%u)" | |
891 | " current=(dead=%u,mas=%u)\n", dlm->name, | |
892 | reco_master, dead_node, reco_master, | |
893 | dlm->reco.dead_node, dlm->reco.new_master); | |
894 | } else { | |
895 | mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " | |
896 | "master=%u), request(dead=%u, master=%u)\n", | |
897 | dlm->name, dlm->reco.dead_node, | |
898 | dlm->reco.new_master, dead_node, reco_master); | |
899 | } | |
900 | goto leave; | |
e2faea4c | 901 | } |
6714d8e8 | 902 | |
6714d8e8 KH |
903 | /* lock resources should have already been moved to the |
904 | * dlm->reco.resources list. now move items from that list | |
905 | * to a temp list if the dead owner matches. note that the | |
906 | * whole cluster recovers only one node at a time, so we | |
907 | * can safely move UNKNOWN lock resources for each recovery | |
908 | * session. */ | |
909 | dlm_move_reco_locks_to_list(dlm, &resources, dead_node); | |
910 | ||
911 | /* now we can begin blasting lockreses without the dlm lock */ | |
29c0fa0f KH |
912 | |
913 | /* any errors returned will be due to the new_master dying, | |
914 | * the dlm_reco_thread should detect this */ | |
800deef3 | 915 | list_for_each_entry(res, &resources, recovering) { |
6714d8e8 KH |
916 | ret = dlm_send_one_lockres(dlm, res, mres, reco_master, |
917 | DLM_MRES_RECOVERY); | |
29c0fa0f | 918 | if (ret < 0) { |
d6dea6e9 KH |
919 | mlog(ML_ERROR, "%s: node %u went down while sending " |
920 | "recovery state for dead node %u, ret=%d\n", dlm->name, | |
921 | reco_master, dead_node, ret); | |
29c0fa0f KH |
922 | skip_all_done = 1; |
923 | break; | |
924 | } | |
6714d8e8 KH |
925 | } |
926 | ||
927 | /* move the resources back to the list */ | |
928 | spin_lock(&dlm->spinlock); | |
929 | list_splice_init(&resources, &dlm->reco.resources); | |
930 | spin_unlock(&dlm->spinlock); | |
931 | ||
29c0fa0f KH |
932 | if (!skip_all_done) { |
933 | ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); | |
934 | if (ret < 0) { | |
d6dea6e9 KH |
935 | mlog(ML_ERROR, "%s: node %u went down while sending " |
936 | "recovery all-done for dead node %u, ret=%d\n", | |
937 | dlm->name, reco_master, dead_node, ret); | |
29c0fa0f KH |
938 | } |
939 | } | |
6a413211 | 940 | leave: |
6714d8e8 KH |
941 | free_page((unsigned long)data); |
942 | } | |
943 | ||
944 | ||
945 | static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) | |
946 | { | |
947 | int ret, tmpret; | |
948 | struct dlm_reco_data_done done_msg; | |
949 | ||
950 | memset(&done_msg, 0, sizeof(done_msg)); | |
951 | done_msg.node_idx = dlm->node_num; | |
952 | done_msg.dead_node = dead_node; | |
953 | mlog(0, "sending DATA DONE message to %u, " | |
954 | "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, | |
955 | done_msg.dead_node); | |
956 | ||
957 | ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, | |
958 | sizeof(done_msg), send_to, &tmpret); | |
29c0fa0f | 959 | if (ret < 0) { |
8decab3c SM |
960 | mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u " |
961 | "to recover dead node %u\n", dlm->name, ret, send_to, | |
962 | dead_node); | |
29c0fa0f | 963 | if (!dlm_is_host_down(ret)) { |
29c0fa0f KH |
964 | BUG(); |
965 | } | |
966 | } else | |
6714d8e8 KH |
967 | ret = tmpret; |
968 | return ret; | |
969 | } | |
970 | ||
971 | ||
d74c9803 KH |
972 | int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, |
973 | void **ret_data) | |
6714d8e8 KH |
974 | { |
975 | struct dlm_ctxt *dlm = data; | |
976 | struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; | |
6714d8e8 KH |
977 | struct dlm_reco_node_data *ndata = NULL; |
978 | int ret = -EINVAL; | |
979 | ||
980 | if (!dlm_grab(dlm)) | |
981 | return -EINVAL; | |
982 | ||
983 | mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " | |
984 | "node_idx=%u, this node=%u\n", done->dead_node, | |
985 | dlm->reco.dead_node, done->node_idx, dlm->node_num); | |
d6dea6e9 KH |
986 | |
987 | mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), | |
988 | "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " | |
989 | "node_idx=%u, this node=%u\n", done->dead_node, | |
990 | dlm->reco.dead_node, done->node_idx, dlm->node_num); | |
6714d8e8 KH |
991 | |
992 | spin_lock(&dlm_reco_state_lock); | |
800deef3 | 993 | list_for_each_entry(ndata, &dlm->reco.node_data, list) { |
6714d8e8 KH |
994 | if (ndata->node_num != done->node_idx) |
995 | continue; | |
996 | ||
997 | switch (ndata->state) { | |
e2faea4c | 998 | /* should have moved beyond INIT but not to FINALIZE yet */ |
6714d8e8 KH |
999 | case DLM_RECO_NODE_DATA_INIT: |
1000 | case DLM_RECO_NODE_DATA_DEAD: | |
6714d8e8 KH |
1001 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: |
1002 | mlog(ML_ERROR, "bad ndata state for node %u:" | |
1003 | " state=%d\n", ndata->node_num, | |
1004 | ndata->state); | |
1005 | BUG(); | |
1006 | break; | |
e2faea4c KH |
1007 | /* these states are possible at this point, anywhere along |
1008 | * the line of recovery */ | |
1009 | case DLM_RECO_NODE_DATA_DONE: | |
6714d8e8 KH |
1010 | case DLM_RECO_NODE_DATA_RECEIVING: |
1011 | case DLM_RECO_NODE_DATA_REQUESTED: | |
1012 | case DLM_RECO_NODE_DATA_REQUESTING: | |
1013 | mlog(0, "node %u is DONE sending " | |
1014 | "recovery data!\n", | |
1015 | ndata->node_num); | |
1016 | ||
1017 | ndata->state = DLM_RECO_NODE_DATA_DONE; | |
1018 | ret = 0; | |
1019 | break; | |
1020 | } | |
1021 | } | |
1022 | spin_unlock(&dlm_reco_state_lock); | |
1023 | ||
1024 | /* wake the recovery thread, some node is done */ | |
1025 | if (!ret) | |
1026 | dlm_kick_recovery_thread(dlm); | |
1027 | ||
1028 | if (ret < 0) | |
1029 | mlog(ML_ERROR, "failed to find recovery node data for node " | |
1030 | "%u\n", done->node_idx); | |
1031 | dlm_put(dlm); | |
1032 | ||
1033 | mlog(0, "leaving reco data done handler, ret=%d\n", ret); | |
1034 | return ret; | |
1035 | } | |
1036 | ||
1037 | static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, | |
1038 | struct list_head *list, | |
1039 | u8 dead_node) | |
1040 | { | |
800deef3 | 1041 | struct dlm_lock_resource *res, *next; |
e2faea4c | 1042 | struct dlm_lock *lock; |
6714d8e8 KH |
1043 | |
1044 | spin_lock(&dlm->spinlock); | |
800deef3 | 1045 | list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { |
e2faea4c KH |
1046 | /* always prune any $RECOVERY entries for dead nodes, |
1047 | * otherwise hangs can occur during later recovery */ | |
6714d8e8 | 1048 | if (dlm_is_recovery_lock(res->lockname.name, |
e2faea4c KH |
1049 | res->lockname.len)) { |
1050 | spin_lock(&res->spinlock); | |
1051 | list_for_each_entry(lock, &res->granted, list) { | |
1052 | if (lock->ml.node == dead_node) { | |
1053 | mlog(0, "AHA! there was " | |
1054 | "a $RECOVERY lock for dead " | |
2bd63216 | 1055 | "node %u (%s)!\n", |
e2faea4c KH |
1056 | dead_node, dlm->name); |
1057 | list_del_init(&lock->list); | |
1058 | dlm_lock_put(lock); | |
1059 | break; | |
1060 | } | |
1061 | } | |
1062 | spin_unlock(&res->spinlock); | |
6714d8e8 | 1063 | continue; |
e2faea4c KH |
1064 | } |
1065 | ||
6714d8e8 KH |
1066 | if (res->owner == dead_node) { |
1067 | mlog(0, "found lockres owned by dead node while " | |
1068 | "doing recovery for node %u. sending it.\n", | |
1069 | dead_node); | |
f116629d | 1070 | list_move_tail(&res->recovering, list); |
6714d8e8 KH |
1071 | } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { |
1072 | mlog(0, "found UNKNOWN owner while doing recovery " | |
1073 | "for node %u. sending it.\n", dead_node); | |
f116629d | 1074 | list_move_tail(&res->recovering, list); |
6714d8e8 KH |
1075 | } |
1076 | } | |
1077 | spin_unlock(&dlm->spinlock); | |
1078 | } | |
1079 | ||
1080 | static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) | |
1081 | { | |
1082 | int total_locks = 0; | |
1083 | struct list_head *iter, *queue = &res->granted; | |
1084 | int i; | |
1085 | ||
1086 | for (i=0; i<3; i++) { | |
1087 | list_for_each(iter, queue) | |
1088 | total_locks++; | |
1089 | queue++; | |
1090 | } | |
1091 | return total_locks; | |
1092 | } | |
1093 | ||
1094 | ||
1095 | static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | |
1096 | struct dlm_migratable_lockres *mres, | |
1097 | u8 send_to, | |
1098 | struct dlm_lock_resource *res, | |
1099 | int total_locks) | |
1100 | { | |
1101 | u64 mig_cookie = be64_to_cpu(mres->mig_cookie); | |
1102 | int mres_total_locks = be32_to_cpu(mres->total_locks); | |
1103 | int sz, ret = 0, status = 0; | |
1104 | u8 orig_flags = mres->flags, | |
1105 | orig_master = mres->master; | |
1106 | ||
1107 | BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); | |
1108 | if (!mres->num_locks) | |
1109 | return 0; | |
1110 | ||
1111 | sz = sizeof(struct dlm_migratable_lockres) + | |
1112 | (mres->num_locks * sizeof(struct dlm_migratable_lock)); | |
1113 | ||
1114 | /* add an all-done flag if we reached the last lock */ | |
1115 | orig_flags = mres->flags; | |
1116 | BUG_ON(total_locks > mres_total_locks); | |
1117 | if (total_locks == mres_total_locks) | |
1118 | mres->flags |= DLM_MRES_ALL_DONE; | |
1119 | ||
ba2bf218 KH |
1120 | mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", |
1121 | dlm->name, res->lockname.len, res->lockname.name, | |
17ae26b6 | 1122 | orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery", |
ba2bf218 KH |
1123 | send_to); |
1124 | ||
6714d8e8 KH |
1125 | /* send it */ |
1126 | ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, | |
1127 | sz, send_to, &status); | |
1128 | if (ret < 0) { | |
1129 | /* XXX: negative status is not handled. | |
1130 | * this will end up killing this node. */ | |
8decab3c SM |
1131 | mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to " |
1132 | "node %u (%s)\n", dlm->name, mres->lockname_len, | |
1133 | mres->lockname, ret, send_to, | |
1134 | (orig_flags & DLM_MRES_MIGRATION ? | |
1135 | "migration" : "recovery")); | |
6714d8e8 KH |
1136 | } else { |
1137 | /* might get an -ENOMEM back here */ | |
1138 | ret = status; | |
1139 | if (ret < 0) { | |
1140 | mlog_errno(ret); | |
1141 | ||
1142 | if (ret == -EFAULT) { | |
1143 | mlog(ML_ERROR, "node %u told me to kill " | |
1144 | "myself!\n", send_to); | |
1145 | BUG(); | |
1146 | } | |
1147 | } | |
1148 | } | |
1149 | ||
1150 | /* zero and reinit the message buffer */ | |
1151 | dlm_init_migratable_lockres(mres, res->lockname.name, | |
1152 | res->lockname.len, mres_total_locks, | |
1153 | mig_cookie, orig_flags, orig_master); | |
1154 | return ret; | |
1155 | } | |
1156 | ||
1157 | static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, | |
1158 | const char *lockname, int namelen, | |
1159 | int total_locks, u64 cookie, | |
1160 | u8 flags, u8 master) | |
1161 | { | |
1162 | /* mres here is one full page */ | |
5fb0f7f0 | 1163 | clear_page(mres); |
6714d8e8 KH |
1164 | mres->lockname_len = namelen; |
1165 | memcpy(mres->lockname, lockname, namelen); | |
1166 | mres->num_locks = 0; | |
1167 | mres->total_locks = cpu_to_be32(total_locks); | |
1168 | mres->mig_cookie = cpu_to_be64(cookie); | |
1169 | mres->flags = flags; | |
1170 | mres->master = master; | |
1171 | } | |
1172 | ||
71656fa6 SM |
1173 | static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, |
1174 | struct dlm_migratable_lockres *mres, | |
1175 | int queue) | |
1176 | { | |
1177 | if (!lock->lksb) | |
1178 | return; | |
1179 | ||
1180 | /* Ignore lvb in all locks in the blocked list */ | |
1181 | if (queue == DLM_BLOCKED_LIST) | |
1182 | return; | |
1183 | ||
1184 | /* Only consider lvbs in locks with granted EX or PR lock levels */ | |
1185 | if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) | |
1186 | return; | |
1187 | ||
1188 | if (dlm_lvb_is_empty(mres->lvb)) { | |
1189 | memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); | |
1190 | return; | |
1191 | } | |
1192 | ||
1193 | /* Ensure the lvb copied for migration matches in other valid locks */ | |
1194 | if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) | |
1195 | return; | |
1196 | ||
1197 | mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " | |
1198 | "node=%u\n", | |
1199 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | |
1200 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | |
1201 | lock->lockres->lockname.len, lock->lockres->lockname.name, | |
1202 | lock->ml.node); | |
1203 | dlm_print_one_lock_resource(lock->lockres); | |
1204 | BUG(); | |
1205 | } | |
6714d8e8 KH |
1206 | |
1207 | /* returns 1 if this lock fills the network structure, | |
1208 | * 0 otherwise */ | |
1209 | static int dlm_add_lock_to_array(struct dlm_lock *lock, | |
1210 | struct dlm_migratable_lockres *mres, int queue) | |
1211 | { | |
1212 | struct dlm_migratable_lock *ml; | |
1213 | int lock_num = mres->num_locks; | |
1214 | ||
1215 | ml = &(mres->ml[lock_num]); | |
1216 | ml->cookie = lock->ml.cookie; | |
1217 | ml->type = lock->ml.type; | |
1218 | ml->convert_type = lock->ml.convert_type; | |
1219 | ml->highest_blocked = lock->ml.highest_blocked; | |
1220 | ml->list = queue; | |
1221 | if (lock->lksb) { | |
1222 | ml->flags = lock->lksb->flags; | |
71656fa6 | 1223 | dlm_prepare_lvb_for_migration(lock, mres, queue); |
6714d8e8 KH |
1224 | } |
1225 | ml->node = lock->ml.node; | |
1226 | mres->num_locks++; | |
1227 | /* we reached the max, send this network message */ | |
1228 | if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) | |
1229 | return 1; | |
1230 | return 0; | |
1231 | } | |
1232 | ||
ba2bf218 KH |
1233 | static void dlm_add_dummy_lock(struct dlm_ctxt *dlm, |
1234 | struct dlm_migratable_lockres *mres) | |
1235 | { | |
1236 | struct dlm_lock dummy; | |
1237 | memset(&dummy, 0, sizeof(dummy)); | |
1238 | dummy.ml.cookie = 0; | |
1239 | dummy.ml.type = LKM_IVMODE; | |
1240 | dummy.ml.convert_type = LKM_IVMODE; | |
1241 | dummy.ml.highest_blocked = LKM_IVMODE; | |
1242 | dummy.lksb = NULL; | |
1243 | dummy.ml.node = dlm->node_num; | |
1244 | dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST); | |
1245 | } | |
1246 | ||
1247 | static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm, | |
1248 | struct dlm_migratable_lock *ml, | |
1249 | u8 *nodenum) | |
1250 | { | |
1251 | if (unlikely(ml->cookie == 0 && | |
1252 | ml->type == LKM_IVMODE && | |
1253 | ml->convert_type == LKM_IVMODE && | |
1254 | ml->highest_blocked == LKM_IVMODE && | |
1255 | ml->list == DLM_BLOCKED_LIST)) { | |
1256 | *nodenum = ml->node; | |
1257 | return 1; | |
1258 | } | |
1259 | return 0; | |
1260 | } | |
6714d8e8 KH |
1261 | |
1262 | int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |
1263 | struct dlm_migratable_lockres *mres, | |
1264 | u8 send_to, u8 flags) | |
1265 | { | |
800deef3 | 1266 | struct list_head *queue; |
6714d8e8 KH |
1267 | int total_locks, i; |
1268 | u64 mig_cookie = 0; | |
1269 | struct dlm_lock *lock; | |
1270 | int ret = 0; | |
1271 | ||
1272 | BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); | |
1273 | ||
1274 | mlog(0, "sending to %u\n", send_to); | |
1275 | ||
1276 | total_locks = dlm_num_locks_in_lockres(res); | |
1277 | if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { | |
1278 | /* rare, but possible */ | |
1279 | mlog(0, "argh. lockres has %d locks. this will " | |
1280 | "require more than one network packet to " | |
1281 | "migrate\n", total_locks); | |
1282 | mig_cookie = dlm_get_next_mig_cookie(); | |
1283 | } | |
1284 | ||
1285 | dlm_init_migratable_lockres(mres, res->lockname.name, | |
1286 | res->lockname.len, total_locks, | |
1287 | mig_cookie, flags, res->owner); | |
1288 | ||
1289 | total_locks = 0; | |
1290 | for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { | |
1291 | queue = dlm_list_idx_to_ptr(res, i); | |
800deef3 | 1292 | list_for_each_entry(lock, queue, list) { |
6714d8e8 KH |
1293 | /* add another lock. */ |
1294 | total_locks++; | |
1295 | if (!dlm_add_lock_to_array(lock, mres, i)) | |
1296 | continue; | |
1297 | ||
1298 | /* this filled the lock message, | |
1299 | * we must send it immediately. */ | |
1300 | ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, | |
1301 | res, total_locks); | |
29c0fa0f KH |
1302 | if (ret < 0) |
1303 | goto error; | |
6714d8e8 KH |
1304 | } |
1305 | } | |
ba2bf218 KH |
1306 | if (total_locks == 0) { |
1307 | /* send a dummy lock to indicate a mastery reference only */ | |
1308 | mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n", | |
1309 | dlm->name, res->lockname.len, res->lockname.name, | |
1310 | send_to, flags & DLM_MRES_RECOVERY ? "recovery" : | |
1311 | "migration"); | |
1312 | dlm_add_dummy_lock(dlm, mres); | |
1313 | } | |
6714d8e8 KH |
1314 | /* flush any remaining locks */ |
1315 | ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); | |
29c0fa0f KH |
1316 | if (ret < 0) |
1317 | goto error; | |
1318 | return ret; | |
1319 | ||
1320 | error: | |
1321 | mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", | |
1322 | dlm->name, ret); | |
1323 | if (!dlm_is_host_down(ret)) | |
6714d8e8 | 1324 | BUG(); |
29c0fa0f KH |
1325 | mlog(0, "%s: node %u went down while sending %s " |
1326 | "lockres %.*s\n", dlm->name, send_to, | |
1327 | flags & DLM_MRES_RECOVERY ? "recovery" : "migration", | |
1328 | res->lockname.len, res->lockname.name); | |
6714d8e8 KH |
1329 | return ret; |
1330 | } | |
1331 | ||
1332 | ||
1333 | ||
1334 | /* | |
1335 | * this message will contain no more than one page worth of | |
1336 | * recovery data, and it will work on only one lockres. | |
1337 | * there may be many locks in this page, and we may need to wait | |
1338 | * for additional packets to complete all the locks (rare, but | |
1339 | * possible). | |
1340 | */ | |
1341 | /* | |
1342 | * NOTE: the allocation error cases here are scary | |
1343 | * we really cannot afford to fail an alloc in recovery | |
1344 | * do we spin? returning an error only delays the problem really | |
1345 | */ | |
1346 | ||
d74c9803 KH |
1347 | int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, |
1348 | void **ret_data) | |
6714d8e8 KH |
1349 | { |
1350 | struct dlm_ctxt *dlm = data; | |
1351 | struct dlm_migratable_lockres *mres = | |
1352 | (struct dlm_migratable_lockres *)msg->buf; | |
1353 | int ret = 0; | |
1354 | u8 real_master; | |
52987e2a | 1355 | u8 extra_refs = 0; |
6714d8e8 KH |
1356 | char *buf = NULL; |
1357 | struct dlm_work_item *item = NULL; | |
1358 | struct dlm_lock_resource *res = NULL; | |
1359 | ||
1360 | if (!dlm_grab(dlm)) | |
1361 | return -EINVAL; | |
1362 | ||
1363 | BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); | |
1364 | ||
1365 | real_master = mres->master; | |
1366 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | |
1367 | /* cannot migrate a lockres with no master */ | |
1368 | BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); | |
1369 | } | |
1370 | ||
1371 | mlog(0, "%s message received from node %u\n", | |
1372 | (mres->flags & DLM_MRES_RECOVERY) ? | |
1373 | "recovery" : "migration", mres->master); | |
1374 | if (mres->flags & DLM_MRES_ALL_DONE) | |
1375 | mlog(0, "all done flag. all lockres data received!\n"); | |
1376 | ||
1377 | ret = -ENOMEM; | |
ad8100e0 | 1378 | buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); |
cd861280 | 1379 | item = kzalloc(sizeof(*item), GFP_NOFS); |
6714d8e8 KH |
1380 | if (!buf || !item) |
1381 | goto leave; | |
1382 | ||
1383 | /* lookup the lock to see if we have a secondary queue for this | |
1384 | * already... just add the locks in and this will have its owner | |
1385 | * and RECOVERY flag changed when it completes. */ | |
1386 | res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len); | |
1387 | if (res) { | |
1388 | /* this will get a ref on res */ | |
1389 | /* mark it as recovering/migrating and hash it */ | |
1390 | spin_lock(&res->spinlock); | |
1391 | if (mres->flags & DLM_MRES_RECOVERY) { | |
1392 | res->state |= DLM_LOCK_RES_RECOVERING; | |
1393 | } else { | |
1394 | if (res->state & DLM_LOCK_RES_MIGRATING) { | |
1395 | /* this is at least the second | |
1396 | * lockres message */ | |
1397 | mlog(0, "lock %.*s is already migrating\n", | |
1398 | mres->lockname_len, | |
1399 | mres->lockname); | |
1400 | } else if (res->state & DLM_LOCK_RES_RECOVERING) { | |
1401 | /* caller should BUG */ | |
1402 | mlog(ML_ERROR, "node is attempting to migrate " | |
1403 | "lock %.*s, but marked as recovering!\n", | |
1404 | mres->lockname_len, mres->lockname); | |
1405 | ret = -EFAULT; | |
1406 | spin_unlock(&res->spinlock); | |
27749f2f | 1407 | dlm_lockres_put(res); |
6714d8e8 KH |
1408 | goto leave; |
1409 | } | |
1410 | res->state |= DLM_LOCK_RES_MIGRATING; | |
1411 | } | |
1412 | spin_unlock(&res->spinlock); | |
1413 | } else { | |
1414 | /* need to allocate, just like if it was | |
1415 | * mastered here normally */ | |
1416 | res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); | |
1417 | if (!res) | |
1418 | goto leave; | |
1419 | ||
1420 | /* to match the ref that we would have gotten if | |
1421 | * dlm_lookup_lockres had succeeded */ | |
1422 | dlm_lockres_get(res); | |
1423 | ||
1424 | /* mark it as recovering/migrating and hash it */ | |
1425 | if (mres->flags & DLM_MRES_RECOVERY) | |
1426 | res->state |= DLM_LOCK_RES_RECOVERING; | |
1427 | else | |
1428 | res->state |= DLM_LOCK_RES_MIGRATING; | |
1429 | ||
1430 | spin_lock(&dlm->spinlock); | |
1431 | __dlm_insert_lockres(dlm, res); | |
1432 | spin_unlock(&dlm->spinlock); | |
1433 | ||
52987e2a SM |
1434 | /* Add an extra ref for this lock-less lockres lest the |
1435 | * dlm_thread purges it before we get the chance to add | |
1436 | * locks to it */ | |
1437 | dlm_lockres_get(res); | |
1438 | ||
1439 | /* There are three refs that need to be put. | |
1440 | * 1. Taken above. | |
1441 | * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). | |
1442 | * 3. dlm_lookup_lockres() | |
1443 | * The first one is handled at the end of this function. The | |
1444 | * other two are handled in the worker thread after locks have | |
1445 | * been attached. Yes, we don't wait for purge time to match | |
1446 | * kref_init. The lockres will still have atleast one ref | |
1447 | * added because it is in the hash __dlm_insert_lockres() */ | |
1448 | extra_refs++; | |
1449 | ||
6714d8e8 KH |
1450 | /* now that the new lockres is inserted, |
1451 | * make it usable by other processes */ | |
1452 | spin_lock(&res->spinlock); | |
1453 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | |
1454 | spin_unlock(&res->spinlock); | |
a6fa3640 | 1455 | wake_up(&res->wq); |
6714d8e8 KH |
1456 | } |
1457 | ||
1458 | /* at this point we have allocated everything we need, | |
1459 | * and we have a hashed lockres with an extra ref and | |
1460 | * the proper res->state flags. */ | |
1461 | ret = 0; | |
ba2bf218 KH |
1462 | spin_lock(&res->spinlock); |
1463 | /* drop this either when master requery finds a different master | |
1464 | * or when a lock is added by the recovery worker */ | |
1465 | dlm_lockres_grab_inflight_ref(dlm, res); | |
6714d8e8 KH |
1466 | if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { |
1467 | /* migration cannot have an unknown master */ | |
1468 | BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); | |
1469 | mlog(0, "recovery has passed me a lockres with an " | |
1470 | "unknown owner.. will need to requery: " | |
1471 | "%.*s\n", mres->lockname_len, mres->lockname); | |
1472 | } else { | |
ba2bf218 KH |
1473 | /* take a reference now to pin the lockres, drop it |
1474 | * when locks are added in the worker */ | |
6714d8e8 | 1475 | dlm_change_lockres_owner(dlm, res, dlm->node_num); |
6714d8e8 | 1476 | } |
ba2bf218 | 1477 | spin_unlock(&res->spinlock); |
6714d8e8 KH |
1478 | |
1479 | /* queue up work for dlm_mig_lockres_worker */ | |
1480 | dlm_grab(dlm); /* get an extra ref for the work item */ | |
1481 | memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ | |
1482 | dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); | |
1483 | item->u.ml.lockres = res; /* already have a ref */ | |
1484 | item->u.ml.real_master = real_master; | |
52987e2a | 1485 | item->u.ml.extra_ref = extra_refs; |
6714d8e8 KH |
1486 | spin_lock(&dlm->work_lock); |
1487 | list_add_tail(&item->list, &dlm->work_list); | |
1488 | spin_unlock(&dlm->work_lock); | |
3156d267 | 1489 | queue_work(dlm->dlm_worker, &dlm->dispatched_work); |
6714d8e8 KH |
1490 | |
1491 | leave: | |
52987e2a SM |
1492 | /* One extra ref taken needs to be put here */ |
1493 | if (extra_refs) | |
1494 | dlm_lockres_put(res); | |
1495 | ||
6714d8e8 KH |
1496 | dlm_put(dlm); |
1497 | if (ret < 0) { | |
7cfa74d1 SK |
1498 | kfree(buf); |
1499 | kfree(item); | |
c1e8d35e | 1500 | mlog_errno(ret); |
6714d8e8 KH |
1501 | } |
1502 | ||
6714d8e8 KH |
1503 | return ret; |
1504 | } | |
1505 | ||
1506 | ||
1507 | static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) | |
1508 | { | |
52987e2a | 1509 | struct dlm_ctxt *dlm; |
6714d8e8 KH |
1510 | struct dlm_migratable_lockres *mres; |
1511 | int ret = 0; | |
1512 | struct dlm_lock_resource *res; | |
1513 | u8 real_master; | |
52987e2a | 1514 | u8 extra_ref; |
6714d8e8 KH |
1515 | |
1516 | dlm = item->dlm; | |
1517 | mres = (struct dlm_migratable_lockres *)data; | |
1518 | ||
1519 | res = item->u.ml.lockres; | |
1520 | real_master = item->u.ml.real_master; | |
52987e2a | 1521 | extra_ref = item->u.ml.extra_ref; |
6714d8e8 KH |
1522 | |
1523 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | |
1524 | /* this case is super-rare. only occurs if | |
1525 | * node death happens during migration. */ | |
1526 | again: | |
1527 | ret = dlm_lockres_master_requery(dlm, res, &real_master); | |
1528 | if (ret < 0) { | |
e2faea4c | 1529 | mlog(0, "dlm_lockres_master_requery ret=%d\n", |
6714d8e8 KH |
1530 | ret); |
1531 | goto again; | |
1532 | } | |
1533 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | |
1534 | mlog(0, "lockres %.*s not claimed. " | |
1535 | "this node will take it.\n", | |
1536 | res->lockname.len, res->lockname.name); | |
1537 | } else { | |
ba2bf218 KH |
1538 | spin_lock(&res->spinlock); |
1539 | dlm_lockres_drop_inflight_ref(dlm, res); | |
1540 | spin_unlock(&res->spinlock); | |
6714d8e8 KH |
1541 | mlog(0, "master needs to respond to sender " |
1542 | "that node %u still owns %.*s\n", | |
1543 | real_master, res->lockname.len, | |
1544 | res->lockname.name); | |
1545 | /* cannot touch this lockres */ | |
1546 | goto leave; | |
1547 | } | |
1548 | } | |
1549 | ||
1550 | ret = dlm_process_recovery_data(dlm, res, mres); | |
1551 | if (ret < 0) | |
1552 | mlog(0, "dlm_process_recovery_data returned %d\n", ret); | |
1553 | else | |
1554 | mlog(0, "dlm_process_recovery_data succeeded\n"); | |
1555 | ||
1556 | if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == | |
1557 | (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { | |
1558 | ret = dlm_finish_migration(dlm, res, mres->master); | |
1559 | if (ret < 0) | |
1560 | mlog_errno(ret); | |
1561 | } | |
1562 | ||
1563 | leave: | |
52987e2a SM |
1564 | /* See comment in dlm_mig_lockres_handler() */ |
1565 | if (res) { | |
1566 | if (extra_ref) | |
1567 | dlm_lockres_put(res); | |
1568 | dlm_lockres_put(res); | |
1569 | } | |
6714d8e8 | 1570 | kfree(data); |
6714d8e8 KH |
1571 | } |
1572 | ||
1573 | ||
1574 | ||
8169cae5 AB |
1575 | static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, |
1576 | struct dlm_lock_resource *res, | |
1577 | u8 *real_master) | |
6714d8e8 KH |
1578 | { |
1579 | struct dlm_node_iter iter; | |
1580 | int nodenum; | |
1581 | int ret = 0; | |
1582 | ||
1583 | *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; | |
1584 | ||
1585 | /* we only reach here if one of the two nodes in a | |
1586 | * migration died while the migration was in progress. | |
1587 | * at this point we need to requery the master. we | |
1588 | * know that the new_master got as far as creating | |
1589 | * an mle on at least one node, but we do not know | |
1590 | * if any nodes had actually cleared the mle and set | |
1591 | * the master to the new_master. the old master | |
1592 | * is supposed to set the owner to UNKNOWN in the | |
1593 | * event of a new_master death, so the only possible | |
1594 | * responses that we can get from nodes here are | |
1595 | * that the master is new_master, or that the master | |
1596 | * is UNKNOWN. | |
1597 | * if all nodes come back with UNKNOWN then we know | |
1598 | * the lock needs remastering here. | |
1599 | * if any node comes back with a valid master, check | |
1600 | * to see if that master is the one that we are | |
1601 | * recovering. if so, then the new_master died and | |
1602 | * we need to remaster this lock. if not, then the | |
1603 | * new_master survived and that node will respond to | |
1604 | * other nodes about the owner. | |
1605 | * if there is an owner, this node needs to dump this | |
1606 | * lockres and alert the sender that this lockres | |
1607 | * was rejected. */ | |
1608 | spin_lock(&dlm->spinlock); | |
1609 | dlm_node_iter_init(dlm->domain_map, &iter); | |
1610 | spin_unlock(&dlm->spinlock); | |
1611 | ||
1612 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | |
1613 | /* do not send to self */ | |
1614 | if (nodenum == dlm->node_num) | |
1615 | continue; | |
1616 | ret = dlm_do_master_requery(dlm, res, nodenum, real_master); | |
1617 | if (ret < 0) { | |
1618 | mlog_errno(ret); | |
c03872f5 KH |
1619 | if (!dlm_is_host_down(ret)) |
1620 | BUG(); | |
1621 | /* host is down, so answer for that node would be | |
1622 | * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ | |
6714d8e8 KH |
1623 | } |
1624 | if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
1625 | mlog(0, "lock master is %u\n", *real_master); | |
1626 | break; | |
1627 | } | |
1628 | } | |
1629 | return ret; | |
1630 | } | |
1631 | ||
1632 | ||
c03872f5 KH |
1633 | int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, |
1634 | u8 nodenum, u8 *real_master) | |
6714d8e8 KH |
1635 | { |
1636 | int ret = -EINVAL; | |
1637 | struct dlm_master_requery req; | |
1638 | int status = DLM_LOCK_RES_OWNER_UNKNOWN; | |
1639 | ||
1640 | memset(&req, 0, sizeof(req)); | |
1641 | req.node_idx = dlm->node_num; | |
1642 | req.namelen = res->lockname.len; | |
1643 | memcpy(req.name, res->lockname.name, res->lockname.len); | |
1644 | ||
1645 | ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, | |
1646 | &req, sizeof(req), nodenum, &status); | |
1647 | /* XXX: negative status not handled properly here. */ | |
1648 | if (ret < 0) | |
a5196ec5 WW |
1649 | mlog(ML_ERROR, "Error %d when sending message %u (key " |
1650 | "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG, | |
1651 | dlm->key, nodenum); | |
6714d8e8 KH |
1652 | else { |
1653 | BUG_ON(status < 0); | |
1654 | BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); | |
1655 | *real_master = (u8) (status & 0xff); | |
1656 | mlog(0, "node %u responded to master requery with %u\n", | |
1657 | nodenum, *real_master); | |
1658 | ret = 0; | |
1659 | } | |
1660 | return ret; | |
1661 | } | |
1662 | ||
1663 | ||
1664 | /* this function cannot error, so unless the sending | |
1665 | * or receiving of the message failed, the owner can | |
1666 | * be trusted */ | |
d74c9803 KH |
1667 | int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, |
1668 | void **ret_data) | |
6714d8e8 KH |
1669 | { |
1670 | struct dlm_ctxt *dlm = data; | |
1671 | struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; | |
1672 | struct dlm_lock_resource *res = NULL; | |
a3d33291 | 1673 | unsigned int hash; |
6714d8e8 KH |
1674 | int master = DLM_LOCK_RES_OWNER_UNKNOWN; |
1675 | u32 flags = DLM_ASSERT_MASTER_REQUERY; | |
1676 | ||
1677 | if (!dlm_grab(dlm)) { | |
1678 | /* since the domain has gone away on this | |
1679 | * node, the proper response is UNKNOWN */ | |
1680 | return master; | |
1681 | } | |
1682 | ||
a3d33291 MF |
1683 | hash = dlm_lockid_hash(req->name, req->namelen); |
1684 | ||
6714d8e8 | 1685 | spin_lock(&dlm->spinlock); |
a3d33291 | 1686 | res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); |
6714d8e8 KH |
1687 | if (res) { |
1688 | spin_lock(&res->spinlock); | |
1689 | master = res->owner; | |
1690 | if (master == dlm->node_num) { | |
1691 | int ret = dlm_dispatch_assert_master(dlm, res, | |
1692 | 0, 0, flags); | |
1693 | if (ret < 0) { | |
1694 | mlog_errno(-ENOMEM); | |
1695 | /* retry!? */ | |
1696 | BUG(); | |
1697 | } | |
52987e2a SM |
1698 | } else /* put.. incase we are not the master */ |
1699 | dlm_lockres_put(res); | |
6714d8e8 KH |
1700 | spin_unlock(&res->spinlock); |
1701 | } | |
1702 | spin_unlock(&dlm->spinlock); | |
1703 | ||
1704 | dlm_put(dlm); | |
1705 | return master; | |
1706 | } | |
1707 | ||
1708 | static inline struct list_head * | |
1709 | dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) | |
1710 | { | |
1711 | struct list_head *ret; | |
1712 | BUG_ON(list_num < 0); | |
1713 | BUG_ON(list_num > 2); | |
1714 | ret = &(res->granted); | |
1715 | ret += list_num; | |
1716 | return ret; | |
1717 | } | |
1718 | /* TODO: do ast flush business | |
1719 | * TODO: do MIGRATING and RECOVERING spinning | |
1720 | */ | |
1721 | ||
1722 | /* | |
1723 | * NOTE about in-flight requests during migration: | |
1724 | * | |
1725 | * Before attempting the migrate, the master has marked the lockres as | |
1726 | * MIGRATING and then flushed all of its pending ASTS. So any in-flight | |
1727 | * requests either got queued before the MIGRATING flag got set, in which | |
1728 | * case the lock data will reflect the change and a return message is on | |
1729 | * the way, or the request failed to get in before MIGRATING got set. In | |
1730 | * this case, the caller will be told to spin and wait for the MIGRATING | |
1731 | * flag to be dropped, then recheck the master. | |
1732 | * This holds true for the convert, cancel and unlock cases, and since lvb | |
1733 | * updates are tied to these same messages, it applies to lvb updates as | |
1734 | * well. For the lock case, there is no way a lock can be on the master | |
1735 | * queue and not be on the secondary queue since the lock is always added | |
1736 | * locally first. This means that the new target node will never be sent | |
1737 | * a lock that he doesn't already have on the list. | |
1738 | * In total, this means that the local lock is correct and should not be | |
1739 | * updated to match the one sent by the master. Any messages sent back | |
1740 | * from the master before the MIGRATING flag will bring the lock properly | |
1741 | * up-to-date, and the change will be ordered properly for the waiter. | |
1742 | * We will *not* attempt to modify the lock underneath the waiter. | |
1743 | */ | |
1744 | ||
1745 | static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | |
1746 | struct dlm_lock_resource *res, | |
1747 | struct dlm_migratable_lockres *mres) | |
1748 | { | |
1749 | struct dlm_migratable_lock *ml; | |
1750 | struct list_head *queue; | |
e17e75ec | 1751 | struct list_head *tmpq = NULL; |
6714d8e8 KH |
1752 | struct dlm_lock *newlock = NULL; |
1753 | struct dlm_lockstatus *lksb = NULL; | |
1754 | int ret = 0; | |
e17e75ec | 1755 | int i, j, bad; |
6714d8e8 | 1756 | struct dlm_lock *lock = NULL; |
ba2bf218 KH |
1757 | u8 from = O2NM_MAX_NODES; |
1758 | unsigned int added = 0; | |
26636bf6 | 1759 | __be64 c; |
6714d8e8 KH |
1760 | |
1761 | mlog(0, "running %d locks for this lockres\n", mres->num_locks); | |
1762 | for (i=0; i<mres->num_locks; i++) { | |
1763 | ml = &(mres->ml[i]); | |
ba2bf218 KH |
1764 | |
1765 | if (dlm_is_dummy_lock(dlm, ml, &from)) { | |
1766 | /* placeholder, just need to set the refmap bit */ | |
1767 | BUG_ON(mres->num_locks != 1); | |
1768 | mlog(0, "%s:%.*s: dummy lock for %u\n", | |
1769 | dlm->name, mres->lockname_len, mres->lockname, | |
1770 | from); | |
1771 | spin_lock(&res->spinlock); | |
8d400b81 | 1772 | dlm_lockres_set_refmap_bit(dlm, res, from); |
ba2bf218 KH |
1773 | spin_unlock(&res->spinlock); |
1774 | added++; | |
1775 | break; | |
1776 | } | |
6714d8e8 KH |
1777 | BUG_ON(ml->highest_blocked != LKM_IVMODE); |
1778 | newlock = NULL; | |
1779 | lksb = NULL; | |
1780 | ||
1781 | queue = dlm_list_num_to_pointer(res, ml->list); | |
e17e75ec | 1782 | tmpq = NULL; |
6714d8e8 KH |
1783 | |
1784 | /* if the lock is for the local node it needs to | |
1785 | * be moved to the proper location within the queue. | |
1786 | * do not allocate a new lock structure. */ | |
1787 | if (ml->node == dlm->node_num) { | |
1788 | /* MIGRATION ONLY! */ | |
1789 | BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); | |
1790 | ||
1791 | spin_lock(&res->spinlock); | |
e17e75ec KH |
1792 | for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { |
1793 | tmpq = dlm_list_idx_to_ptr(res, j); | |
800deef3 | 1794 | list_for_each_entry(lock, tmpq, list) { |
e17e75ec KH |
1795 | if (lock->ml.cookie != ml->cookie) |
1796 | lock = NULL; | |
1797 | else | |
1798 | break; | |
1799 | } | |
1800 | if (lock) | |
6714d8e8 KH |
1801 | break; |
1802 | } | |
1803 | ||
1804 | /* lock is always created locally first, and | |
1805 | * destroyed locally last. it must be on the list */ | |
1806 | if (!lock) { | |
26636bf6 SM |
1807 | c = ml->cookie; |
1808 | mlog(ML_ERROR, "Could not find local lock " | |
1809 | "with cookie %u:%llu, node %u, " | |
1810 | "list %u, flags 0x%x, type %d, " | |
1811 | "conv %d, highest blocked %d\n", | |
74aa2585 | 1812 | dlm_get_lock_cookie_node(be64_to_cpu(c)), |
26636bf6 SM |
1813 | dlm_get_lock_cookie_seq(be64_to_cpu(c)), |
1814 | ml->node, ml->list, ml->flags, ml->type, | |
1815 | ml->convert_type, ml->highest_blocked); | |
1816 | __dlm_print_one_lock_resource(res); | |
1817 | BUG(); | |
1818 | } | |
1819 | ||
1820 | if (lock->ml.node != ml->node) { | |
1821 | c = lock->ml.cookie; | |
1822 | mlog(ML_ERROR, "Mismatched node# in lock " | |
1823 | "cookie %u:%llu, name %.*s, node %u\n", | |
1824 | dlm_get_lock_cookie_node(be64_to_cpu(c)), | |
1825 | dlm_get_lock_cookie_seq(be64_to_cpu(c)), | |
1826 | res->lockname.len, res->lockname.name, | |
1827 | lock->ml.node); | |
1828 | c = ml->cookie; | |
1829 | mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " | |
1830 | "node %u, list %u, flags 0x%x, type %d, " | |
1831 | "conv %d, highest blocked %d\n", | |
1832 | dlm_get_lock_cookie_node(be64_to_cpu(c)), | |
1833 | dlm_get_lock_cookie_seq(be64_to_cpu(c)), | |
1834 | ml->node, ml->list, ml->flags, ml->type, | |
1835 | ml->convert_type, ml->highest_blocked); | |
71ac1062 | 1836 | __dlm_print_one_lock_resource(res); |
6714d8e8 KH |
1837 | BUG(); |
1838 | } | |
6714d8e8 | 1839 | |
e17e75ec | 1840 | if (tmpq != queue) { |
26636bf6 SM |
1841 | c = ml->cookie; |
1842 | mlog(0, "Lock cookie %u:%llu was on list %u " | |
1843 | "instead of list %u for %.*s\n", | |
1844 | dlm_get_lock_cookie_node(be64_to_cpu(c)), | |
1845 | dlm_get_lock_cookie_seq(be64_to_cpu(c)), | |
1846 | j, ml->list, res->lockname.len, | |
1847 | res->lockname.name); | |
1848 | __dlm_print_one_lock_resource(res); | |
e17e75ec KH |
1849 | spin_unlock(&res->spinlock); |
1850 | continue; | |
1851 | } | |
1852 | ||
6714d8e8 KH |
1853 | /* see NOTE above about why we do not update |
1854 | * to match the master here */ | |
1855 | ||
1856 | /* move the lock to its proper place */ | |
1857 | /* do not alter lock refcount. switching lists. */ | |
f116629d | 1858 | list_move_tail(&lock->list, queue); |
6714d8e8 | 1859 | spin_unlock(&res->spinlock); |
ba2bf218 | 1860 | added++; |
6714d8e8 KH |
1861 | |
1862 | mlog(0, "just reordered a local lock!\n"); | |
1863 | continue; | |
1864 | } | |
1865 | ||
1866 | /* lock is for another node. */ | |
1867 | newlock = dlm_new_lock(ml->type, ml->node, | |
1868 | be64_to_cpu(ml->cookie), NULL); | |
1869 | if (!newlock) { | |
1870 | ret = -ENOMEM; | |
1871 | goto leave; | |
1872 | } | |
1873 | lksb = newlock->lksb; | |
1874 | dlm_lock_attach_lockres(newlock, res); | |
1875 | ||
1876 | if (ml->convert_type != LKM_IVMODE) { | |
1877 | BUG_ON(queue != &res->converting); | |
1878 | newlock->ml.convert_type = ml->convert_type; | |
1879 | } | |
1880 | lksb->flags |= (ml->flags & | |
1881 | (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); | |
ccd8b1f9 KH |
1882 | |
1883 | if (ml->type == LKM_NLMODE) | |
1884 | goto skip_lvb; | |
1885 | ||
8bc674cb | 1886 | if (!dlm_lvb_is_empty(mres->lvb)) { |
6714d8e8 KH |
1887 | if (lksb->flags & DLM_LKSB_PUT_LVB) { |
1888 | /* other node was trying to update | |
1889 | * lvb when node died. recreate the | |
1890 | * lksb with the updated lvb. */ | |
1891 | memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); | |
ccd8b1f9 KH |
1892 | /* the lock resource lvb update must happen |
1893 | * NOW, before the spinlock is dropped. | |
1894 | * we no longer wait for the AST to update | |
1895 | * the lvb. */ | |
1896 | memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); | |
6714d8e8 | 1897 | } else { |
2bd63216 | 1898 | /* otherwise, the node is sending its |
6714d8e8 KH |
1899 | * most recent valid lvb info */ |
1900 | BUG_ON(ml->type != LKM_EXMODE && | |
1901 | ml->type != LKM_PRMODE); | |
8bc674cb | 1902 | if (!dlm_lvb_is_empty(res->lvb) && |
ccd8b1f9 KH |
1903 | (ml->type == LKM_EXMODE || |
1904 | memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { | |
1905 | int i; | |
1906 | mlog(ML_ERROR, "%s:%.*s: received bad " | |
1907 | "lvb! type=%d\n", dlm->name, | |
1908 | res->lockname.len, | |
1909 | res->lockname.name, ml->type); | |
1910 | printk("lockres lvb=["); | |
1911 | for (i=0; i<DLM_LVB_LEN; i++) | |
1912 | printk("%02x", res->lvb[i]); | |
1913 | printk("]\nmigrated lvb=["); | |
1914 | for (i=0; i<DLM_LVB_LEN; i++) | |
1915 | printk("%02x", mres->lvb[i]); | |
1916 | printk("]\n"); | |
1917 | dlm_print_one_lock_resource(res); | |
1918 | BUG(); | |
6714d8e8 KH |
1919 | } |
1920 | memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); | |
1921 | } | |
1922 | } | |
ccd8b1f9 | 1923 | skip_lvb: |
6714d8e8 KH |
1924 | |
1925 | /* NOTE: | |
1926 | * wrt lock queue ordering and recovery: | |
1927 | * 1. order of locks on granted queue is | |
1928 | * meaningless. | |
1929 | * 2. order of locks on converting queue is | |
1930 | * LOST with the node death. sorry charlie. | |
1931 | * 3. order of locks on the blocked queue is | |
1932 | * also LOST. | |
1933 | * order of locks does not affect integrity, it | |
1934 | * just means that a lock request may get pushed | |
1935 | * back in line as a result of the node death. | |
1936 | * also note that for a given node the lock order | |
1937 | * for its secondary queue locks is preserved | |
1938 | * relative to each other, but clearly *not* | |
1939 | * preserved relative to locks from other nodes. | |
1940 | */ | |
c3187ce5 | 1941 | bad = 0; |
6714d8e8 | 1942 | spin_lock(&res->spinlock); |
c3187ce5 KH |
1943 | list_for_each_entry(lock, queue, list) { |
1944 | if (lock->ml.cookie == ml->cookie) { | |
26636bf6 | 1945 | c = lock->ml.cookie; |
c3187ce5 KH |
1946 | mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " |
1947 | "exists on this lockres!\n", dlm->name, | |
1948 | res->lockname.len, res->lockname.name, | |
74aa2585 KH |
1949 | dlm_get_lock_cookie_node(be64_to_cpu(c)), |
1950 | dlm_get_lock_cookie_seq(be64_to_cpu(c))); | |
c3187ce5 KH |
1951 | |
1952 | mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " | |
1953 | "node=%u, cookie=%u:%llu, queue=%d\n", | |
1954 | ml->type, ml->convert_type, ml->node, | |
74aa2585 KH |
1955 | dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)), |
1956 | dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)), | |
c3187ce5 KH |
1957 | ml->list); |
1958 | ||
1959 | __dlm_print_one_lock_resource(res); | |
1960 | bad = 1; | |
1961 | break; | |
1962 | } | |
1963 | } | |
1964 | if (!bad) { | |
1965 | dlm_lock_get(newlock); | |
1966 | list_add_tail(&newlock->list, queue); | |
ba2bf218 KH |
1967 | mlog(0, "%s:%.*s: added lock for node %u, " |
1968 | "setting refmap bit\n", dlm->name, | |
1969 | res->lockname.len, res->lockname.name, ml->node); | |
8d400b81 | 1970 | dlm_lockres_set_refmap_bit(dlm, res, ml->node); |
ba2bf218 | 1971 | added++; |
c3187ce5 | 1972 | } |
6714d8e8 KH |
1973 | spin_unlock(&res->spinlock); |
1974 | } | |
1975 | mlog(0, "done running all the locks\n"); | |
1976 | ||
1977 | leave: | |
ba2bf218 | 1978 | /* balance the ref taken when the work was queued */ |
50635f15 KH |
1979 | spin_lock(&res->spinlock); |
1980 | dlm_lockres_drop_inflight_ref(dlm, res); | |
1981 | spin_unlock(&res->spinlock); | |
ba2bf218 | 1982 | |
6714d8e8 KH |
1983 | if (ret < 0) { |
1984 | mlog_errno(ret); | |
1985 | if (newlock) | |
1986 | dlm_lock_put(newlock); | |
1987 | } | |
1988 | ||
6714d8e8 KH |
1989 | return ret; |
1990 | } | |
1991 | ||
1992 | void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, | |
1993 | struct dlm_lock_resource *res) | |
1994 | { | |
1995 | int i; | |
800deef3 CH |
1996 | struct list_head *queue; |
1997 | struct dlm_lock *lock, *next; | |
6714d8e8 | 1998 | |
a524812b WW |
1999 | assert_spin_locked(&dlm->spinlock); |
2000 | assert_spin_locked(&res->spinlock); | |
6714d8e8 | 2001 | res->state |= DLM_LOCK_RES_RECOVERING; |
69d72b06 KH |
2002 | if (!list_empty(&res->recovering)) { |
2003 | mlog(0, | |
2004 | "Recovering res %s:%.*s, is already on recovery list!\n", | |
2005 | dlm->name, res->lockname.len, res->lockname.name); | |
6714d8e8 | 2006 | list_del_init(&res->recovering); |
52987e2a | 2007 | dlm_lockres_put(res); |
69d72b06 KH |
2008 | } |
2009 | /* We need to hold a reference while on the recovery list */ | |
2010 | dlm_lockres_get(res); | |
6714d8e8 KH |
2011 | list_add_tail(&res->recovering, &dlm->reco.resources); |
2012 | ||
2013 | /* find any pending locks and put them back on proper list */ | |
2014 | for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { | |
2015 | queue = dlm_list_idx_to_ptr(res, i); | |
800deef3 | 2016 | list_for_each_entry_safe(lock, next, queue, list) { |
6714d8e8 KH |
2017 | dlm_lock_get(lock); |
2018 | if (lock->convert_pending) { | |
2019 | /* move converting lock back to granted */ | |
2020 | BUG_ON(i != DLM_CONVERTING_LIST); | |
2021 | mlog(0, "node died with convert pending " | |
2022 | "on %.*s. move back to granted list.\n", | |
2023 | res->lockname.len, res->lockname.name); | |
2024 | dlm_revert_pending_convert(res, lock); | |
2025 | lock->convert_pending = 0; | |
2026 | } else if (lock->lock_pending) { | |
2027 | /* remove pending lock requests completely */ | |
2028 | BUG_ON(i != DLM_BLOCKED_LIST); | |
2029 | mlog(0, "node died with lock pending " | |
2030 | "on %.*s. remove from blocked list and skip.\n", | |
2031 | res->lockname.len, res->lockname.name); | |
2032 | /* lock will be floating until ref in | |
2033 | * dlmlock_remote is freed after the network | |
2034 | * call returns. ok for it to not be on any | |
2035 | * list since no ast can be called | |
2036 | * (the master is dead). */ | |
2037 | dlm_revert_pending_lock(res, lock); | |
2038 | lock->lock_pending = 0; | |
2039 | } else if (lock->unlock_pending) { | |
2040 | /* if an unlock was in progress, treat as | |
2041 | * if this had completed successfully | |
2042 | * before sending this lock state to the | |
2043 | * new master. note that the dlm_unlock | |
2044 | * call is still responsible for calling | |
2045 | * the unlockast. that will happen after | |
2046 | * the network call times out. for now, | |
2047 | * just move lists to prepare the new | |
2048 | * recovery master. */ | |
2049 | BUG_ON(i != DLM_GRANTED_LIST); | |
2050 | mlog(0, "node died with unlock pending " | |
2051 | "on %.*s. remove from blocked list and skip.\n", | |
2052 | res->lockname.len, res->lockname.name); | |
2053 | dlm_commit_pending_unlock(res, lock); | |
2054 | lock->unlock_pending = 0; | |
2055 | } else if (lock->cancel_pending) { | |
2056 | /* if a cancel was in progress, treat as | |
2057 | * if this had completed successfully | |
2058 | * before sending this lock state to the | |
2059 | * new master */ | |
2060 | BUG_ON(i != DLM_CONVERTING_LIST); | |
2061 | mlog(0, "node died with cancel pending " | |
2062 | "on %.*s. move back to granted list.\n", | |
2063 | res->lockname.len, res->lockname.name); | |
2064 | dlm_commit_pending_cancel(res, lock); | |
2065 | lock->cancel_pending = 0; | |
2066 | } | |
2067 | dlm_lock_put(lock); | |
2068 | } | |
2069 | } | |
2070 | } | |
2071 | ||
2072 | ||
2073 | ||
2074 | /* removes all recovered locks from the recovery list. | |
2075 | * sets the res->owner to the new master. | |
2076 | * unsets the RECOVERY flag and wakes waiters. */ | |
2077 | static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | |
2078 | u8 dead_node, u8 new_master) | |
2079 | { | |
2080 | int i; | |
81f2094a | 2081 | struct hlist_head *bucket; |
800deef3 | 2082 | struct dlm_lock_resource *res, *next; |
6714d8e8 | 2083 | |
6714d8e8 KH |
2084 | assert_spin_locked(&dlm->spinlock); |
2085 | ||
800deef3 | 2086 | list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { |
6714d8e8 | 2087 | if (res->owner == dead_node) { |
0afbba13 SM |
2088 | mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", |
2089 | dlm->name, res->lockname.len, res->lockname.name, | |
2090 | res->owner, new_master); | |
6714d8e8 KH |
2091 | list_del_init(&res->recovering); |
2092 | spin_lock(&res->spinlock); | |
ba2bf218 KH |
2093 | /* new_master has our reference from |
2094 | * the lock state sent during recovery */ | |
6714d8e8 KH |
2095 | dlm_change_lockres_owner(dlm, res, new_master); |
2096 | res->state &= ~DLM_LOCK_RES_RECOVERING; | |
ba2bf218 | 2097 | if (__dlm_lockres_has_locks(res)) |
69d72b06 | 2098 | __dlm_dirty_lockres(dlm, res); |
6714d8e8 KH |
2099 | spin_unlock(&res->spinlock); |
2100 | wake_up(&res->wq); | |
69d72b06 | 2101 | dlm_lockres_put(res); |
6714d8e8 KH |
2102 | } |
2103 | } | |
2104 | ||
2105 | /* this will become unnecessary eventually, but | |
2106 | * for now we need to run the whole hash, clear | |
2107 | * the RECOVERING state and set the owner | |
2108 | * if necessary */ | |
81f2094a | 2109 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { |
03d864c0 | 2110 | bucket = dlm_lockres_hash(dlm, i); |
b67bfe0d | 2111 | hlist_for_each_entry(res, bucket, hash_node) { |
0afbba13 SM |
2112 | if (!(res->state & DLM_LOCK_RES_RECOVERING)) |
2113 | continue; | |
2114 | ||
2115 | if (res->owner != dead_node && | |
2116 | res->owner != dlm->node_num) | |
2117 | continue; | |
2118 | ||
2119 | if (!list_empty(&res->recovering)) { | |
2120 | list_del_init(&res->recovering); | |
2121 | dlm_lockres_put(res); | |
6714d8e8 | 2122 | } |
0afbba13 SM |
2123 | |
2124 | /* new_master has our reference from | |
2125 | * the lock state sent during recovery */ | |
2126 | mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", | |
2127 | dlm->name, res->lockname.len, res->lockname.name, | |
2128 | res->owner, new_master); | |
2129 | spin_lock(&res->spinlock); | |
2130 | dlm_change_lockres_owner(dlm, res, new_master); | |
2131 | res->state &= ~DLM_LOCK_RES_RECOVERING; | |
2132 | if (__dlm_lockres_has_locks(res)) | |
2133 | __dlm_dirty_lockres(dlm, res); | |
2134 | spin_unlock(&res->spinlock); | |
2135 | wake_up(&res->wq); | |
6714d8e8 KH |
2136 | } |
2137 | } | |
2138 | } | |
2139 | ||
2140 | static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) | |
2141 | { | |
2142 | if (local) { | |
2143 | if (lock->ml.type != LKM_EXMODE && | |
2144 | lock->ml.type != LKM_PRMODE) | |
2145 | return 1; | |
2146 | } else if (lock->ml.type == LKM_EXMODE) | |
2147 | return 1; | |
2148 | return 0; | |
2149 | } | |
2150 | ||
2151 | static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, | |
2152 | struct dlm_lock_resource *res, u8 dead_node) | |
2153 | { | |
800deef3 | 2154 | struct list_head *queue; |
6714d8e8 KH |
2155 | struct dlm_lock *lock; |
2156 | int blank_lvb = 0, local = 0; | |
2157 | int i; | |
2158 | u8 search_node; | |
2159 | ||
2160 | assert_spin_locked(&dlm->spinlock); | |
2161 | assert_spin_locked(&res->spinlock); | |
2162 | ||
2163 | if (res->owner == dlm->node_num) | |
2bd63216 | 2164 | /* if this node owned the lockres, and if the dead node |
6714d8e8 KH |
2165 | * had an EX when he died, blank out the lvb */ |
2166 | search_node = dead_node; | |
2167 | else { | |
2168 | /* if this is a secondary lockres, and we had no EX or PR | |
2169 | * locks granted, we can no longer trust the lvb */ | |
2170 | search_node = dlm->node_num; | |
2171 | local = 1; /* check local state for valid lvb */ | |
2172 | } | |
2173 | ||
2174 | for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { | |
2175 | queue = dlm_list_idx_to_ptr(res, i); | |
800deef3 | 2176 | list_for_each_entry(lock, queue, list) { |
6714d8e8 KH |
2177 | if (lock->ml.node == search_node) { |
2178 | if (dlm_lvb_needs_invalidation(lock, local)) { | |
2179 | /* zero the lksb lvb and lockres lvb */ | |
2180 | blank_lvb = 1; | |
2181 | memset(lock->lksb->lvb, 0, DLM_LVB_LEN); | |
2182 | } | |
2183 | } | |
2184 | } | |
2185 | } | |
2186 | ||
2187 | if (blank_lvb) { | |
2188 | mlog(0, "clearing %.*s lvb, dead node %u had EX\n", | |
2189 | res->lockname.len, res->lockname.name, dead_node); | |
2190 | memset(res->lvb, 0, DLM_LVB_LEN); | |
2191 | } | |
2192 | } | |
2193 | ||
2194 | static void dlm_free_dead_locks(struct dlm_ctxt *dlm, | |
2195 | struct dlm_lock_resource *res, u8 dead_node) | |
2196 | { | |
800deef3 | 2197 | struct dlm_lock *lock, *next; |
ba2bf218 | 2198 | unsigned int freed = 0; |
6714d8e8 KH |
2199 | |
2200 | /* this node is the lockres master: | |
2201 | * 1) remove any stale locks for the dead node | |
2bd63216 | 2202 | * 2) if the dead node had an EX when he died, blank out the lvb |
6714d8e8 KH |
2203 | */ |
2204 | assert_spin_locked(&dlm->spinlock); | |
2205 | assert_spin_locked(&res->spinlock); | |
2206 | ||
2c5c54ac SM |
2207 | /* We do two dlm_lock_put(). One for removing from list and the other is |
2208 | * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */ | |
2209 | ||
6714d8e8 | 2210 | /* TODO: check pending_asts, pending_basts here */ |
800deef3 | 2211 | list_for_each_entry_safe(lock, next, &res->granted, list) { |
6714d8e8 KH |
2212 | if (lock->ml.node == dead_node) { |
2213 | list_del_init(&lock->list); | |
2214 | dlm_lock_put(lock); | |
2c5c54ac SM |
2215 | /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ |
2216 | dlm_lock_put(lock); | |
ba2bf218 | 2217 | freed++; |
6714d8e8 KH |
2218 | } |
2219 | } | |
800deef3 | 2220 | list_for_each_entry_safe(lock, next, &res->converting, list) { |
6714d8e8 KH |
2221 | if (lock->ml.node == dead_node) { |
2222 | list_del_init(&lock->list); | |
2223 | dlm_lock_put(lock); | |
2c5c54ac SM |
2224 | /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ |
2225 | dlm_lock_put(lock); | |
ba2bf218 | 2226 | freed++; |
6714d8e8 KH |
2227 | } |
2228 | } | |
800deef3 | 2229 | list_for_each_entry_safe(lock, next, &res->blocked, list) { |
6714d8e8 KH |
2230 | if (lock->ml.node == dead_node) { |
2231 | list_del_init(&lock->list); | |
2232 | dlm_lock_put(lock); | |
2c5c54ac SM |
2233 | /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ |
2234 | dlm_lock_put(lock); | |
ba2bf218 | 2235 | freed++; |
6714d8e8 KH |
2236 | } |
2237 | } | |
2238 | ||
ba2bf218 KH |
2239 | if (freed) { |
2240 | mlog(0, "%s:%.*s: freed %u locks for dead node %u, " | |
2241 | "dropping ref from lockres\n", dlm->name, | |
2242 | res->lockname.len, res->lockname.name, freed, dead_node); | |
cda70ba8 SM |
2243 | if(!test_bit(dead_node, res->refmap)) { |
2244 | mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " | |
2245 | "but ref was not set\n", dlm->name, | |
2246 | res->lockname.len, res->lockname.name, freed, dead_node); | |
2247 | __dlm_print_one_lock_resource(res); | |
2248 | } | |
8d400b81 | 2249 | dlm_lockres_clear_refmap_bit(dlm, res, dead_node); |
ba2bf218 KH |
2250 | } else if (test_bit(dead_node, res->refmap)) { |
2251 | mlog(0, "%s:%.*s: dead node %u had a ref, but had " | |
2252 | "no locks and had not purged before dying\n", dlm->name, | |
2253 | res->lockname.len, res->lockname.name, dead_node); | |
8d400b81 | 2254 | dlm_lockres_clear_refmap_bit(dlm, res, dead_node); |
ba2bf218 KH |
2255 | } |
2256 | ||
6714d8e8 KH |
2257 | /* do not kick thread yet */ |
2258 | __dlm_dirty_lockres(dlm, res); | |
2259 | } | |
2260 | ||
2261 | /* if this node is the recovery master, and there are no | |
2262 | * locks for a given lockres owned by this node that are in | |
2263 | * either PR or EX mode, zero out the lvb before requesting. | |
2264 | * | |
2265 | */ | |
2266 | ||
2267 | ||
2268 | static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | |
2269 | { | |
6714d8e8 KH |
2270 | struct dlm_lock_resource *res; |
2271 | int i; | |
81f2094a | 2272 | struct hlist_head *bucket; |
e2faea4c | 2273 | struct dlm_lock *lock; |
6714d8e8 KH |
2274 | |
2275 | ||
2276 | /* purge any stale mles */ | |
2277 | dlm_clean_master_list(dlm, dead_node); | |
2278 | ||
2279 | /* | |
2280 | * now clean up all lock resources. there are two rules: | |
2281 | * | |
2282 | * 1) if the dead node was the master, move the lockres | |
2283 | * to the recovering list. set the RECOVERING flag. | |
2284 | * this lockres needs to be cleaned up before it can | |
2285 | * be used further. | |
2286 | * | |
2287 | * 2) if this node was the master, remove all locks from | |
2288 | * each of the lockres queues that were owned by the | |
2289 | * dead node. once recovery finishes, the dlm thread | |
2290 | * can be kicked again to see if any ASTs or BASTs | |
2291 | * need to be fired as a result. | |
2292 | */ | |
81f2094a | 2293 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { |
03d864c0 | 2294 | bucket = dlm_lockres_hash(dlm, i); |
b67bfe0d | 2295 | hlist_for_each_entry(res, bucket, hash_node) { |
e2faea4c KH |
2296 | /* always prune any $RECOVERY entries for dead nodes, |
2297 | * otherwise hangs can occur during later recovery */ | |
6714d8e8 | 2298 | if (dlm_is_recovery_lock(res->lockname.name, |
e2faea4c KH |
2299 | res->lockname.len)) { |
2300 | spin_lock(&res->spinlock); | |
2301 | list_for_each_entry(lock, &res->granted, list) { | |
2302 | if (lock->ml.node == dead_node) { | |
2303 | mlog(0, "AHA! there was " | |
2304 | "a $RECOVERY lock for dead " | |
2305 | "node %u (%s)!\n", | |
2306 | dead_node, dlm->name); | |
2307 | list_del_init(&lock->list); | |
2308 | dlm_lock_put(lock); | |
2309 | break; | |
2310 | } | |
2311 | } | |
2312 | spin_unlock(&res->spinlock); | |
6714d8e8 | 2313 | continue; |
2bd63216 | 2314 | } |
6714d8e8 KH |
2315 | spin_lock(&res->spinlock); |
2316 | /* zero the lvb if necessary */ | |
2317 | dlm_revalidate_lvb(dlm, res, dead_node); | |
ba2bf218 | 2318 | if (res->owner == dead_node) { |
a524812b | 2319 | if (res->state & DLM_LOCK_RES_DROPPING_REF) { |
8decab3c | 2320 | mlog(ML_NOTICE, "%s: res %.*s, Skip " |
a524812b | 2321 | "recovery as it is being freed\n", |
8decab3c | 2322 | dlm->name, res->lockname.len, |
a524812b WW |
2323 | res->lockname.name); |
2324 | } else | |
2325 | dlm_move_lockres_to_recovery_list(dlm, | |
2326 | res); | |
ba2bf218 | 2327 | |
ba2bf218 | 2328 | } else if (res->owner == dlm->node_num) { |
6714d8e8 KH |
2329 | dlm_free_dead_locks(dlm, res, dead_node); |
2330 | __dlm_lockres_calc_usage(dlm, res); | |
2331 | } | |
2332 | spin_unlock(&res->spinlock); | |
2333 | } | |
2334 | } | |
2335 | ||
2336 | } | |
2337 | ||
2338 | static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) | |
2339 | { | |
2340 | assert_spin_locked(&dlm->spinlock); | |
2341 | ||
466d1a45 KH |
2342 | if (dlm->reco.new_master == idx) { |
2343 | mlog(0, "%s: recovery master %d just died\n", | |
2344 | dlm->name, idx); | |
2345 | if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { | |
2346 | /* finalize1 was reached, so it is safe to clear | |
2347 | * the new_master and dead_node. that recovery | |
2348 | * is complete. */ | |
2349 | mlog(0, "%s: dead master %d had reached " | |
2350 | "finalize1 state, clearing\n", dlm->name, idx); | |
2351 | dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; | |
2352 | __dlm_reset_recovery(dlm); | |
2353 | } | |
2354 | } | |
2355 | ||
2d4b1cbb TM |
2356 | /* Clean up join state on node death. */ |
2357 | if (dlm->joining_node == idx) { | |
2358 | mlog(0, "Clearing join state for node %u\n", idx); | |
2359 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | |
2360 | } | |
2361 | ||
6714d8e8 KH |
2362 | /* check to see if the node is already considered dead */ |
2363 | if (!test_bit(idx, dlm->live_nodes_map)) { | |
2364 | mlog(0, "for domain %s, node %d is already dead. " | |
2365 | "another node likely did recovery already.\n", | |
2366 | dlm->name, idx); | |
2367 | return; | |
2368 | } | |
2369 | ||
2370 | /* check to see if we do not care about this node */ | |
2371 | if (!test_bit(idx, dlm->domain_map)) { | |
2372 | /* This also catches the case that we get a node down | |
2373 | * but haven't joined the domain yet. */ | |
2374 | mlog(0, "node %u already removed from domain!\n", idx); | |
2375 | return; | |
2376 | } | |
2377 | ||
2378 | clear_bit(idx, dlm->live_nodes_map); | |
2379 | ||
6714d8e8 KH |
2380 | /* make sure local cleanup occurs before the heartbeat events */ |
2381 | if (!test_bit(idx, dlm->recovery_map)) | |
2382 | dlm_do_local_recovery_cleanup(dlm, idx); | |
2383 | ||
2384 | /* notify anything attached to the heartbeat events */ | |
2385 | dlm_hb_event_notify_attached(dlm, idx, 0); | |
2386 | ||
2387 | mlog(0, "node %u being removed from domain map!\n", idx); | |
2388 | clear_bit(idx, dlm->domain_map); | |
bddefdee | 2389 | clear_bit(idx, dlm->exit_domain_map); |
6714d8e8 KH |
2390 | /* wake up migration waiters if a node goes down. |
2391 | * perhaps later we can genericize this for other waiters. */ | |
2392 | wake_up(&dlm->migration_wq); | |
2393 | ||
2394 | if (test_bit(idx, dlm->recovery_map)) | |
2395 | mlog(0, "domain %s, node %u already added " | |
2396 | "to recovery map!\n", dlm->name, idx); | |
2397 | else | |
2398 | set_bit(idx, dlm->recovery_map); | |
2399 | } | |
2400 | ||
2401 | void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) | |
2402 | { | |
2403 | struct dlm_ctxt *dlm = data; | |
2404 | ||
2405 | if (!dlm_grab(dlm)) | |
2406 | return; | |
2407 | ||
6561168c MF |
2408 | /* |
2409 | * This will notify any dlm users that a node in our domain | |
2410 | * went away without notifying us first. | |
2411 | */ | |
2412 | if (test_bit(idx, dlm->domain_map)) | |
2413 | dlm_fire_domain_eviction_callbacks(dlm, idx); | |
2414 | ||
6714d8e8 KH |
2415 | spin_lock(&dlm->spinlock); |
2416 | __dlm_hb_node_down(dlm, idx); | |
2417 | spin_unlock(&dlm->spinlock); | |
2418 | ||
2419 | dlm_put(dlm); | |
2420 | } | |
2421 | ||
2422 | void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) | |
2423 | { | |
2424 | struct dlm_ctxt *dlm = data; | |
2425 | ||
2426 | if (!dlm_grab(dlm)) | |
2427 | return; | |
2428 | ||
2429 | spin_lock(&dlm->spinlock); | |
6714d8e8 | 2430 | set_bit(idx, dlm->live_nodes_map); |
e2faea4c KH |
2431 | /* do NOT notify mle attached to the heartbeat events. |
2432 | * new nodes are not interesting in mastery until joined. */ | |
6714d8e8 KH |
2433 | spin_unlock(&dlm->spinlock); |
2434 | ||
2435 | dlm_put(dlm); | |
2436 | } | |
2437 | ||
2438 | static void dlm_reco_ast(void *astdata) | |
2439 | { | |
2440 | struct dlm_ctxt *dlm = astdata; | |
2441 | mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", | |
2442 | dlm->node_num, dlm->name); | |
2443 | } | |
2444 | static void dlm_reco_bast(void *astdata, int blocked_type) | |
2445 | { | |
2446 | struct dlm_ctxt *dlm = astdata; | |
2447 | mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", | |
2448 | dlm->node_num, dlm->name); | |
2449 | } | |
2450 | static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) | |
2451 | { | |
2452 | mlog(0, "unlockast for recovery lock fired!\n"); | |
2453 | } | |
2454 | ||
e2faea4c KH |
2455 | /* |
2456 | * dlm_pick_recovery_master will continually attempt to use | |
2457 | * dlmlock() on the special "$RECOVERY" lockres with the | |
2458 | * LKM_NOQUEUE flag to get an EX. every thread that enters | |
2459 | * this function on each node racing to become the recovery | |
2460 | * master will not stop attempting this until either: | |
2461 | * a) this node gets the EX (and becomes the recovery master), | |
2bd63216 | 2462 | * or b) dlm->reco.new_master gets set to some nodenum |
e2faea4c KH |
2463 | * != O2NM_INVALID_NODE_NUM (another node will do the reco). |
2464 | * so each time a recovery master is needed, the entire cluster | |
2465 | * will sync at this point. if the new master dies, that will | |
2466 | * be detected in dlm_do_recovery */ | |
6714d8e8 KH |
2467 | static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) |
2468 | { | |
2469 | enum dlm_status ret; | |
2470 | struct dlm_lockstatus lksb; | |
2471 | int status = -EINVAL; | |
2472 | ||
2473 | mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", | |
2474 | dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); | |
2bd63216 | 2475 | again: |
6714d8e8 KH |
2476 | memset(&lksb, 0, sizeof(lksb)); |
2477 | ||
2478 | ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, | |
3384f3df MF |
2479 | DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN, |
2480 | dlm_reco_ast, dlm, dlm_reco_bast); | |
6714d8e8 | 2481 | |
e2faea4c KH |
2482 | mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", |
2483 | dlm->name, ret, lksb.status); | |
2484 | ||
6714d8e8 KH |
2485 | if (ret == DLM_NORMAL) { |
2486 | mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", | |
2487 | dlm->name, dlm->node_num); | |
2bd63216 SM |
2488 | |
2489 | /* got the EX lock. check to see if another node | |
e2faea4c KH |
2490 | * just became the reco master */ |
2491 | if (dlm_reco_master_ready(dlm)) { | |
2492 | mlog(0, "%s: got reco EX lock, but %u will " | |
2493 | "do the recovery\n", dlm->name, | |
2494 | dlm->reco.new_master); | |
2495 | status = -EEXIST; | |
2496 | } else { | |
898effac KH |
2497 | status = 0; |
2498 | ||
2499 | /* see if recovery was already finished elsewhere */ | |
2500 | spin_lock(&dlm->spinlock); | |
2501 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { | |
2bd63216 | 2502 | status = -EINVAL; |
898effac KH |
2503 | mlog(0, "%s: got reco EX lock, but " |
2504 | "node got recovered already\n", dlm->name); | |
2505 | if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { | |
2506 | mlog(ML_ERROR, "%s: new master is %u " | |
2bd63216 | 2507 | "but no dead node!\n", |
898effac KH |
2508 | dlm->name, dlm->reco.new_master); |
2509 | BUG(); | |
2510 | } | |
2511 | } | |
2512 | spin_unlock(&dlm->spinlock); | |
2513 | } | |
2514 | ||
2515 | /* if this node has actually become the recovery master, | |
2516 | * set the master and send the messages to begin recovery */ | |
2517 | if (!status) { | |
2518 | mlog(0, "%s: dead=%u, this=%u, sending " | |
2bd63216 | 2519 | "begin_reco now\n", dlm->name, |
898effac | 2520 | dlm->reco.dead_node, dlm->node_num); |
e2faea4c KH |
2521 | status = dlm_send_begin_reco_message(dlm, |
2522 | dlm->reco.dead_node); | |
2523 | /* this always succeeds */ | |
2524 | BUG_ON(status); | |
2525 | ||
2526 | /* set the new_master to this node */ | |
2527 | spin_lock(&dlm->spinlock); | |
ab27eb6f | 2528 | dlm_set_reco_master(dlm, dlm->node_num); |
e2faea4c KH |
2529 | spin_unlock(&dlm->spinlock); |
2530 | } | |
6714d8e8 KH |
2531 | |
2532 | /* recovery lock is a special case. ast will not get fired, | |
2533 | * so just go ahead and unlock it. */ | |
2534 | ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); | |
e2faea4c KH |
2535 | if (ret == DLM_DENIED) { |
2536 | mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n"); | |
2537 | ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm); | |
2538 | } | |
6714d8e8 KH |
2539 | if (ret != DLM_NORMAL) { |
2540 | /* this would really suck. this could only happen | |
2541 | * if there was a network error during the unlock | |
2542 | * because of node death. this means the unlock | |
2543 | * is actually "done" and the lock structure is | |
2544 | * even freed. we can continue, but only | |
2545 | * because this specific lock name is special. */ | |
e2faea4c | 2546 | mlog(ML_ERROR, "dlmunlock returned %d\n", ret); |
6714d8e8 KH |
2547 | } |
2548 | } else if (ret == DLM_NOTQUEUED) { | |
2549 | mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", | |
2550 | dlm->name, dlm->node_num); | |
2551 | /* another node is master. wait on | |
2bd63216 | 2552 | * reco.new_master != O2NM_INVALID_NODE_NUM |
e2faea4c KH |
2553 | * for at most one second */ |
2554 | wait_event_timeout(dlm->dlm_reco_thread_wq, | |
2555 | dlm_reco_master_ready(dlm), | |
2556 | msecs_to_jiffies(1000)); | |
2557 | if (!dlm_reco_master_ready(dlm)) { | |
2558 | mlog(0, "%s: reco master taking awhile\n", | |
2559 | dlm->name); | |
2560 | goto again; | |
2561 | } | |
2562 | /* another node has informed this one that it is reco master */ | |
2563 | mlog(0, "%s: reco master %u is ready to recover %u\n", | |
2564 | dlm->name, dlm->reco.new_master, dlm->reco.dead_node); | |
6714d8e8 | 2565 | status = -EEXIST; |
c8df412e KH |
2566 | } else if (ret == DLM_RECOVERING) { |
2567 | mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", | |
2568 | dlm->name, dlm->node_num); | |
2569 | goto again; | |
e2faea4c KH |
2570 | } else { |
2571 | struct dlm_lock_resource *res; | |
2572 | ||
2573 | /* dlmlock returned something other than NOTQUEUED or NORMAL */ | |
2574 | mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), " | |
2575 | "lksb.status=%s\n", dlm->name, dlm_errname(ret), | |
2576 | dlm_errname(lksb.status)); | |
2577 | res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, | |
2578 | DLM_RECOVERY_LOCK_NAME_LEN); | |
2579 | if (res) { | |
2580 | dlm_print_one_lock_resource(res); | |
2581 | dlm_lockres_put(res); | |
2582 | } else { | |
2583 | mlog(ML_ERROR, "recovery lock not found\n"); | |
2584 | } | |
2585 | BUG(); | |
6714d8e8 KH |
2586 | } |
2587 | ||
2588 | return status; | |
2589 | } | |
2590 | ||
2591 | static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) | |
2592 | { | |
2593 | struct dlm_begin_reco br; | |
2594 | int ret = 0; | |
2595 | struct dlm_node_iter iter; | |
2596 | int nodenum; | |
2597 | int status; | |
2598 | ||
d6dea6e9 | 2599 | mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); |
6714d8e8 KH |
2600 | |
2601 | spin_lock(&dlm->spinlock); | |
2602 | dlm_node_iter_init(dlm->domain_map, &iter); | |
2603 | spin_unlock(&dlm->spinlock); | |
2604 | ||
2605 | clear_bit(dead_node, iter.node_map); | |
2606 | ||
2607 | memset(&br, 0, sizeof(br)); | |
2608 | br.node_idx = dlm->node_num; | |
2609 | br.dead_node = dead_node; | |
2610 | ||
2611 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | |
2612 | ret = 0; | |
2613 | if (nodenum == dead_node) { | |
2614 | mlog(0, "not sending begin reco to dead node " | |
2615 | "%u\n", dead_node); | |
2616 | continue; | |
2617 | } | |
2618 | if (nodenum == dlm->node_num) { | |
2619 | mlog(0, "not sending begin reco to self\n"); | |
2620 | continue; | |
2621 | } | |
e2faea4c | 2622 | retry: |
6714d8e8 KH |
2623 | ret = -EINVAL; |
2624 | mlog(0, "attempting to send begin reco msg to %d\n", | |
2625 | nodenum); | |
2626 | ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, | |
2627 | &br, sizeof(br), nodenum, &status); | |
2628 | /* negative status is handled ok by caller here */ | |
2629 | if (ret >= 0) | |
2630 | ret = status; | |
e2faea4c KH |
2631 | if (dlm_is_host_down(ret)) { |
2632 | /* node is down. not involved in recovery | |
2633 | * so just keep going */ | |
a5196ec5 | 2634 | mlog(ML_NOTICE, "%s: node %u was down when sending " |
e2faea4c KH |
2635 | "begin reco msg (%d)\n", dlm->name, nodenum, ret); |
2636 | ret = 0; | |
2637 | } | |
cd34edd8 SM |
2638 | |
2639 | /* | |
2640 | * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, | |
2641 | * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. | |
2642 | * We are handling both for compatibility reasons. | |
2643 | */ | |
2644 | if (ret == -EAGAIN || ret == EAGAIN) { | |
aad1b153 TY |
2645 | mlog(0, "%s: trying to start recovery of node " |
2646 | "%u, but node %u is waiting for last recovery " | |
2647 | "to complete, backoff for a bit\n", dlm->name, | |
2648 | dead_node, nodenum); | |
2649 | msleep(100); | |
2650 | goto retry; | |
2651 | } | |
6714d8e8 KH |
2652 | if (ret < 0) { |
2653 | struct dlm_lock_resource *res; | |
a5196ec5 | 2654 | |
2bd63216 | 2655 | /* this is now a serious problem, possibly ENOMEM |
e2faea4c | 2656 | * in the network stack. must retry */ |
6714d8e8 KH |
2657 | mlog_errno(ret); |
2658 | mlog(ML_ERROR, "begin reco of dlm %s to node %u " | |
a5196ec5 | 2659 | "returned %d\n", dlm->name, nodenum, ret); |
6714d8e8 KH |
2660 | res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, |
2661 | DLM_RECOVERY_LOCK_NAME_LEN); | |
2662 | if (res) { | |
2663 | dlm_print_one_lock_resource(res); | |
2664 | dlm_lockres_put(res); | |
2665 | } else { | |
2666 | mlog(ML_ERROR, "recovery lock not found\n"); | |
2667 | } | |
2bd63216 | 2668 | /* sleep for a bit in hopes that we can avoid |
e2faea4c KH |
2669 | * another ENOMEM */ |
2670 | msleep(100); | |
2671 | goto retry; | |
6714d8e8 KH |
2672 | } |
2673 | } | |
2674 | ||
2675 | return ret; | |
2676 | } | |
2677 | ||
d74c9803 KH |
2678 | int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, |
2679 | void **ret_data) | |
6714d8e8 KH |
2680 | { |
2681 | struct dlm_ctxt *dlm = data; | |
2682 | struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; | |
2683 | ||
2684 | /* ok to return 0, domain has gone away */ | |
2685 | if (!dlm_grab(dlm)) | |
2686 | return 0; | |
2687 | ||
466d1a45 KH |
2688 | spin_lock(&dlm->spinlock); |
2689 | if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { | |
2690 | mlog(0, "%s: node %u wants to recover node %u (%u:%u) " | |
2691 | "but this node is in finalize state, waiting on finalize2\n", | |
2692 | dlm->name, br->node_idx, br->dead_node, | |
2693 | dlm->reco.dead_node, dlm->reco.new_master); | |
2694 | spin_unlock(&dlm->spinlock); | |
aad1b153 | 2695 | return -EAGAIN; |
466d1a45 KH |
2696 | } |
2697 | spin_unlock(&dlm->spinlock); | |
2698 | ||
d6dea6e9 KH |
2699 | mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", |
2700 | dlm->name, br->node_idx, br->dead_node, | |
2701 | dlm->reco.dead_node, dlm->reco.new_master); | |
6714d8e8 KH |
2702 | |
2703 | dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); | |
2704 | ||
2705 | spin_lock(&dlm->spinlock); | |
2706 | if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { | |
e2faea4c KH |
2707 | if (test_bit(dlm->reco.new_master, dlm->recovery_map)) { |
2708 | mlog(0, "%s: new_master %u died, changing " | |
2709 | "to %u\n", dlm->name, dlm->reco.new_master, | |
2710 | br->node_idx); | |
2711 | } else { | |
2712 | mlog(0, "%s: new_master %u NOT DEAD, changing " | |
2713 | "to %u\n", dlm->name, dlm->reco.new_master, | |
2714 | br->node_idx); | |
2715 | /* may not have seen the new master as dead yet */ | |
2716 | } | |
6714d8e8 KH |
2717 | } |
2718 | if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { | |
e2faea4c | 2719 | mlog(ML_NOTICE, "%s: dead_node previously set to %u, " |
2bd63216 | 2720 | "node %u changing it to %u\n", dlm->name, |
e2faea4c | 2721 | dlm->reco.dead_node, br->node_idx, br->dead_node); |
6714d8e8 | 2722 | } |
ab27eb6f KH |
2723 | dlm_set_reco_master(dlm, br->node_idx); |
2724 | dlm_set_reco_dead_node(dlm, br->dead_node); | |
6714d8e8 | 2725 | if (!test_bit(br->dead_node, dlm->recovery_map)) { |
e2faea4c | 2726 | mlog(0, "recovery master %u sees %u as dead, but this " |
6714d8e8 KH |
2727 | "node has not yet. marking %u as dead\n", |
2728 | br->node_idx, br->dead_node, br->dead_node); | |
e2faea4c KH |
2729 | if (!test_bit(br->dead_node, dlm->domain_map) || |
2730 | !test_bit(br->dead_node, dlm->live_nodes_map)) | |
2731 | mlog(0, "%u not in domain/live_nodes map " | |
2732 | "so setting it in reco map manually\n", | |
2733 | br->dead_node); | |
c03872f5 KH |
2734 | /* force the recovery cleanup in __dlm_hb_node_down |
2735 | * both of these will be cleared in a moment */ | |
2736 | set_bit(br->dead_node, dlm->domain_map); | |
2737 | set_bit(br->dead_node, dlm->live_nodes_map); | |
6714d8e8 KH |
2738 | __dlm_hb_node_down(dlm, br->dead_node); |
2739 | } | |
2740 | spin_unlock(&dlm->spinlock); | |
2741 | ||
2742 | dlm_kick_recovery_thread(dlm); | |
d6dea6e9 KH |
2743 | |
2744 | mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", | |
2745 | dlm->name, br->node_idx, br->dead_node, | |
2746 | dlm->reco.dead_node, dlm->reco.new_master); | |
2747 | ||
6714d8e8 KH |
2748 | dlm_put(dlm); |
2749 | return 0; | |
2750 | } | |
2751 | ||
466d1a45 | 2752 | #define DLM_FINALIZE_STAGE2 0x01 |
6714d8e8 KH |
2753 | static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) |
2754 | { | |
2755 | int ret = 0; | |
2756 | struct dlm_finalize_reco fr; | |
2757 | struct dlm_node_iter iter; | |
2758 | int nodenum; | |
2759 | int status; | |
466d1a45 | 2760 | int stage = 1; |
6714d8e8 | 2761 | |
466d1a45 KH |
2762 | mlog(0, "finishing recovery for node %s:%u, " |
2763 | "stage %d\n", dlm->name, dlm->reco.dead_node, stage); | |
6714d8e8 KH |
2764 | |
2765 | spin_lock(&dlm->spinlock); | |
2766 | dlm_node_iter_init(dlm->domain_map, &iter); | |
2767 | spin_unlock(&dlm->spinlock); | |
2768 | ||
466d1a45 | 2769 | stage2: |
6714d8e8 KH |
2770 | memset(&fr, 0, sizeof(fr)); |
2771 | fr.node_idx = dlm->node_num; | |
2772 | fr.dead_node = dlm->reco.dead_node; | |
466d1a45 KH |
2773 | if (stage == 2) |
2774 | fr.flags |= DLM_FINALIZE_STAGE2; | |
6714d8e8 KH |
2775 | |
2776 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | |
2777 | if (nodenum == dlm->node_num) | |
2778 | continue; | |
2779 | ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, | |
2780 | &fr, sizeof(fr), nodenum, &status); | |
466d1a45 | 2781 | if (ret >= 0) |
6714d8e8 | 2782 | ret = status; |
466d1a45 | 2783 | if (ret < 0) { |
a5196ec5 WW |
2784 | mlog(ML_ERROR, "Error %d when sending message %u (key " |
2785 | "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG, | |
2786 | dlm->key, nodenum); | |
6714d8e8 | 2787 | if (dlm_is_host_down(ret)) { |
2bd63216 SM |
2788 | /* this has no effect on this recovery |
2789 | * session, so set the status to zero to | |
6714d8e8 KH |
2790 | * finish out the last recovery */ |
2791 | mlog(ML_ERROR, "node %u went down after this " | |
2792 | "node finished recovery.\n", nodenum); | |
2793 | ret = 0; | |
c27069e6 | 2794 | continue; |
6714d8e8 | 2795 | } |
6714d8e8 KH |
2796 | break; |
2797 | } | |
2798 | } | |
466d1a45 KH |
2799 | if (stage == 1) { |
2800 | /* reset the node_iter back to the top and send finalize2 */ | |
2801 | iter.curnode = -1; | |
2802 | stage = 2; | |
2803 | goto stage2; | |
2804 | } | |
6714d8e8 KH |
2805 | |
2806 | return ret; | |
2807 | } | |
2808 | ||
d74c9803 KH |
2809 | int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, |
2810 | void **ret_data) | |
6714d8e8 KH |
2811 | { |
2812 | struct dlm_ctxt *dlm = data; | |
2813 | struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; | |
466d1a45 | 2814 | int stage = 1; |
6714d8e8 KH |
2815 | |
2816 | /* ok to return 0, domain has gone away */ | |
2817 | if (!dlm_grab(dlm)) | |
2818 | return 0; | |
2819 | ||
466d1a45 KH |
2820 | if (fr->flags & DLM_FINALIZE_STAGE2) |
2821 | stage = 2; | |
6714d8e8 | 2822 | |
466d1a45 KH |
2823 | mlog(0, "%s: node %u finalizing recovery stage%d of " |
2824 | "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, | |
2825 | fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); | |
2bd63216 | 2826 | |
6714d8e8 KH |
2827 | spin_lock(&dlm->spinlock); |
2828 | ||
2829 | if (dlm->reco.new_master != fr->node_idx) { | |
2830 | mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " | |
2831 | "%u is supposed to be the new master, dead=%u\n", | |
2832 | fr->node_idx, dlm->reco.new_master, fr->dead_node); | |
2833 | BUG(); | |
2834 | } | |
2835 | if (dlm->reco.dead_node != fr->dead_node) { | |
2836 | mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " | |
2837 | "node %u, but node %u is supposed to be dead\n", | |
2838 | fr->node_idx, fr->dead_node, dlm->reco.dead_node); | |
2839 | BUG(); | |
2840 | } | |
2841 | ||
466d1a45 KH |
2842 | switch (stage) { |
2843 | case 1: | |
2844 | dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); | |
2845 | if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { | |
2846 | mlog(ML_ERROR, "%s: received finalize1 from " | |
2847 | "new master %u for dead node %u, but " | |
2848 | "this node has already received it!\n", | |
2849 | dlm->name, fr->node_idx, fr->dead_node); | |
2850 | dlm_print_reco_node_status(dlm); | |
2851 | BUG(); | |
2852 | } | |
2853 | dlm->reco.state |= DLM_RECO_STATE_FINALIZE; | |
2854 | spin_unlock(&dlm->spinlock); | |
2855 | break; | |
2856 | case 2: | |
2857 | if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { | |
2858 | mlog(ML_ERROR, "%s: received finalize2 from " | |
2859 | "new master %u for dead node %u, but " | |
2860 | "this node did not have finalize1!\n", | |
2861 | dlm->name, fr->node_idx, fr->dead_node); | |
2862 | dlm_print_reco_node_status(dlm); | |
2863 | BUG(); | |
2864 | } | |
2865 | dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; | |
2866 | spin_unlock(&dlm->spinlock); | |
2867 | dlm_reset_recovery(dlm); | |
2868 | dlm_kick_recovery_thread(dlm); | |
2869 | break; | |
2870 | default: | |
2871 | BUG(); | |
2872 | } | |
6714d8e8 | 2873 | |
d6dea6e9 KH |
2874 | mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", |
2875 | dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); | |
2876 | ||
6714d8e8 KH |
2877 | dlm_put(dlm); |
2878 | return 0; | |
2879 | } |