]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ocfs2/dlm/dlmrecovery.c
Merge ../linux-2.6
[mirror_ubuntu-bionic-kernel.git] / fs / ocfs2 / dlm / dlmrecovery.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmrecovery.c
5 *
6 * recovery stuff
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
43
44
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
48
49 #include "dlmapi.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
52
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54 #include "cluster/masklog.h"
55
56 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
57
58 static int dlm_recovery_thread(void *data);
59 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
60 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
61 static void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
62 static int dlm_do_recovery(struct dlm_ctxt *dlm);
63
64 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
65 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
66 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
67 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
68 u8 request_from, u8 dead_node);
69 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
70
71 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
72 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
73 const char *lockname, int namelen,
74 int total_locks, u64 cookie,
75 u8 flags, u8 master);
76 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
77 struct dlm_migratable_lockres *mres,
78 u8 send_to,
79 struct dlm_lock_resource *res,
80 int total_locks);
81 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
82 struct dlm_lock_resource *res,
83 u8 *real_master);
84 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
85 struct dlm_lock_resource *res,
86 struct dlm_migratable_lockres *mres);
87 static int dlm_do_master_requery(struct dlm_ctxt *dlm,
88 struct dlm_lock_resource *res,
89 u8 nodenum, u8 *real_master);
90 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
91 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
92 u8 dead_node, u8 send_to);
93 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
94 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
95 struct list_head *list, u8 dead_node);
96 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
97 u8 dead_node, u8 new_master);
98 static void dlm_reco_ast(void *astdata);
99 static void dlm_reco_bast(void *astdata, int blocked_type);
100 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
101 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
102 void *data);
103 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
104
105 static u64 dlm_get_next_mig_cookie(void);
106
107 static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED;
108 static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED;
109 static u64 dlm_mig_cookie = 1;
110
111 static u64 dlm_get_next_mig_cookie(void)
112 {
113 u64 c;
114 spin_lock(&dlm_mig_cookie_lock);
115 c = dlm_mig_cookie;
116 if (dlm_mig_cookie == (~0ULL))
117 dlm_mig_cookie = 1;
118 else
119 dlm_mig_cookie++;
120 spin_unlock(&dlm_mig_cookie_lock);
121 return c;
122 }
123
124 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
125 {
126 spin_lock(&dlm->spinlock);
127 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
128 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
129 dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
130 spin_unlock(&dlm->spinlock);
131 }
132
133 /* Worker function used during recovery. */
134 void dlm_dispatch_work(void *data)
135 {
136 struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
137 LIST_HEAD(tmp_list);
138 struct list_head *iter, *iter2;
139 struct dlm_work_item *item;
140 dlm_workfunc_t *workfunc;
141
142 spin_lock(&dlm->work_lock);
143 list_splice_init(&dlm->work_list, &tmp_list);
144 spin_unlock(&dlm->work_lock);
145
146 list_for_each_safe(iter, iter2, &tmp_list) {
147 item = list_entry(iter, struct dlm_work_item, list);
148 workfunc = item->func;
149 list_del_init(&item->list);
150
151 /* already have ref on dlm to avoid having
152 * it disappear. just double-check. */
153 BUG_ON(item->dlm != dlm);
154
155 /* this is allowed to sleep and
156 * call network stuff */
157 workfunc(item, item->data);
158
159 dlm_put(dlm);
160 kfree(item);
161 }
162 }
163
164 /*
165 * RECOVERY THREAD
166 */
167
168 static void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
169 {
170 /* wake the recovery thread
171 * this will wake the reco thread in one of three places
172 * 1) sleeping with no recovery happening
173 * 2) sleeping with recovery mastered elsewhere
174 * 3) recovery mastered here, waiting on reco data */
175
176 wake_up(&dlm->dlm_reco_thread_wq);
177 }
178
179 /* Launch the recovery thread */
180 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
181 {
182 mlog(0, "starting dlm recovery thread...\n");
183
184 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
185 "dlm_reco_thread");
186 if (IS_ERR(dlm->dlm_reco_thread_task)) {
187 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
188 dlm->dlm_reco_thread_task = NULL;
189 return -EINVAL;
190 }
191
192 return 0;
193 }
194
195 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
196 {
197 if (dlm->dlm_reco_thread_task) {
198 mlog(0, "waiting for dlm recovery thread to exit\n");
199 kthread_stop(dlm->dlm_reco_thread_task);
200 dlm->dlm_reco_thread_task = NULL;
201 }
202 }
203
204
205
206 /*
207 * this is lame, but here's how recovery works...
208 * 1) all recovery threads cluster wide will work on recovering
209 * ONE node at a time
210 * 2) negotiate who will take over all the locks for the dead node.
211 * thats right... ALL the locks.
212 * 3) once a new master is chosen, everyone scans all locks
213 * and moves aside those mastered by the dead guy
214 * 4) each of these locks should be locked until recovery is done
215 * 5) the new master collects up all of secondary lock queue info
216 * one lock at a time, forcing each node to communicate back
217 * before continuing
218 * 6) each secondary lock queue responds with the full known lock info
219 * 7) once the new master has run all its locks, it sends a ALLDONE!
220 * message to everyone
221 * 8) upon receiving this message, the secondary queue node unlocks
222 * and responds to the ALLDONE
223 * 9) once the new master gets responses from everyone, he unlocks
224 * everything and recovery for this dead node is done
225 *10) go back to 2) while there are still dead nodes
226 *
227 */
228
229
230 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
231
232 static int dlm_recovery_thread(void *data)
233 {
234 int status;
235 struct dlm_ctxt *dlm = data;
236 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
237
238 mlog(0, "dlm thread running for %s...\n", dlm->name);
239
240 while (!kthread_should_stop()) {
241 if (dlm_joined(dlm)) {
242 status = dlm_do_recovery(dlm);
243 if (status == -EAGAIN) {
244 /* do not sleep, recheck immediately. */
245 continue;
246 }
247 if (status < 0)
248 mlog_errno(status);
249 }
250
251 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
252 kthread_should_stop(),
253 timeout);
254 }
255
256 mlog(0, "quitting DLM recovery thread\n");
257 return 0;
258 }
259
260 /* returns true when the recovery master has contacted us */
261 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
262 {
263 int ready;
264 spin_lock(&dlm->spinlock);
265 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
266 spin_unlock(&dlm->spinlock);
267 return ready;
268 }
269
270 /* returns true if node is no longer in the domain
271 * could be dead or just not joined */
272 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
273 {
274 int dead;
275 spin_lock(&dlm->spinlock);
276 dead = test_bit(node, dlm->domain_map);
277 spin_unlock(&dlm->spinlock);
278 return dead;
279 }
280
281 int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
282 {
283 if (timeout) {
284 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
285 "death of node %u\n", dlm->name, timeout, node);
286 wait_event_timeout(dlm->dlm_reco_thread_wq,
287 dlm_is_node_dead(dlm, node),
288 msecs_to_jiffies(timeout));
289 } else {
290 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
291 "of death of node %u\n", dlm->name, node);
292 wait_event(dlm->dlm_reco_thread_wq,
293 dlm_is_node_dead(dlm, node));
294 }
295 /* for now, return 0 */
296 return 0;
297 }
298
299 /* callers of the top-level api calls (dlmlock/dlmunlock) should
300 * block on the dlm->reco.event when recovery is in progress.
301 * the dlm recovery thread will set this state when it begins
302 * recovering a dead node (as the new master or not) and clear
303 * the state and wake as soon as all affected lock resources have
304 * been marked with the RECOVERY flag */
305 static int dlm_in_recovery(struct dlm_ctxt *dlm)
306 {
307 int in_recovery;
308 spin_lock(&dlm->spinlock);
309 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
310 spin_unlock(&dlm->spinlock);
311 return in_recovery;
312 }
313
314
315 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
316 {
317 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
318 }
319
320 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
321 {
322 spin_lock(&dlm->spinlock);
323 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
324 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
325 spin_unlock(&dlm->spinlock);
326 }
327
328 static void dlm_end_recovery(struct dlm_ctxt *dlm)
329 {
330 spin_lock(&dlm->spinlock);
331 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
332 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
333 spin_unlock(&dlm->spinlock);
334 wake_up(&dlm->reco.event);
335 }
336
337 static int dlm_do_recovery(struct dlm_ctxt *dlm)
338 {
339 int status = 0;
340 int ret;
341
342 spin_lock(&dlm->spinlock);
343
344 /* check to see if the new master has died */
345 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
346 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
347 mlog(0, "new master %u died while recovering %u!\n",
348 dlm->reco.new_master, dlm->reco.dead_node);
349 /* unset the new_master, leave dead_node */
350 dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
351 }
352
353 /* select a target to recover */
354 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
355 int bit;
356
357 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
358 if (bit >= O2NM_MAX_NODES || bit < 0)
359 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
360 else
361 dlm->reco.dead_node = bit;
362 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
363 /* BUG? */
364 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
365 dlm->reco.dead_node);
366 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
367 }
368
369 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
370 // mlog(0, "nothing to recover! sleeping now!\n");
371 spin_unlock(&dlm->spinlock);
372 /* return to main thread loop and sleep. */
373 return 0;
374 }
375 mlog(0, "recovery thread found node %u in the recovery map!\n",
376 dlm->reco.dead_node);
377 spin_unlock(&dlm->spinlock);
378
379 /* take write barrier */
380 /* (stops the list reshuffling thread, proxy ast handling) */
381 dlm_begin_recovery(dlm);
382
383 if (dlm->reco.new_master == dlm->node_num)
384 goto master_here;
385
386 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
387 /* choose a new master, returns 0 if this node
388 * is the master, -EEXIST if it's another node.
389 * this does not return until a new master is chosen
390 * or recovery completes entirely. */
391 ret = dlm_pick_recovery_master(dlm);
392 if (!ret) {
393 /* already notified everyone. go. */
394 goto master_here;
395 }
396 mlog(0, "another node will master this recovery session.\n");
397 }
398 mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n",
399 dlm->name, dlm->reco.new_master,
400 dlm->node_num, dlm->reco.dead_node);
401
402 /* it is safe to start everything back up here
403 * because all of the dead node's lock resources
404 * have been marked as in-recovery */
405 dlm_end_recovery(dlm);
406
407 /* sleep out in main dlm_recovery_thread loop. */
408 return 0;
409
410 master_here:
411 mlog(0, "mastering recovery of %s:%u here(this=%u)!\n",
412 dlm->name, dlm->reco.dead_node, dlm->node_num);
413
414 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
415 if (status < 0) {
416 mlog(ML_ERROR, "error %d remastering locks for node %u, "
417 "retrying.\n", status, dlm->reco.dead_node);
418 /* yield a bit to allow any final network messages
419 * to get handled on remaining nodes */
420 msleep(100);
421 } else {
422 /* success! see if any other nodes need recovery */
423 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
424 dlm->name, dlm->reco.dead_node, dlm->node_num);
425 dlm_reset_recovery(dlm);
426 }
427 dlm_end_recovery(dlm);
428
429 /* continue and look for another dead node */
430 return -EAGAIN;
431 }
432
433 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
434 {
435 int status = 0;
436 struct dlm_reco_node_data *ndata;
437 struct list_head *iter;
438 int all_nodes_done;
439 int destroy = 0;
440 int pass = 0;
441
442 status = dlm_init_recovery_area(dlm, dead_node);
443 if (status < 0)
444 goto leave;
445
446 /* safe to access the node data list without a lock, since this
447 * process is the only one to change the list */
448 list_for_each(iter, &dlm->reco.node_data) {
449 ndata = list_entry (iter, struct dlm_reco_node_data, list);
450 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
451 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
452
453 mlog(0, "requesting lock info from node %u\n",
454 ndata->node_num);
455
456 if (ndata->node_num == dlm->node_num) {
457 ndata->state = DLM_RECO_NODE_DATA_DONE;
458 continue;
459 }
460
461 status = dlm_request_all_locks(dlm, ndata->node_num, dead_node);
462 if (status < 0) {
463 mlog_errno(status);
464 if (dlm_is_host_down(status))
465 ndata->state = DLM_RECO_NODE_DATA_DEAD;
466 else {
467 destroy = 1;
468 goto leave;
469 }
470 }
471
472 switch (ndata->state) {
473 case DLM_RECO_NODE_DATA_INIT:
474 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
475 case DLM_RECO_NODE_DATA_REQUESTED:
476 BUG();
477 break;
478 case DLM_RECO_NODE_DATA_DEAD:
479 mlog(0, "node %u died after requesting "
480 "recovery info for node %u\n",
481 ndata->node_num, dead_node);
482 // start all over
483 destroy = 1;
484 status = -EAGAIN;
485 goto leave;
486 case DLM_RECO_NODE_DATA_REQUESTING:
487 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
488 mlog(0, "now receiving recovery data from "
489 "node %u for dead node %u\n",
490 ndata->node_num, dead_node);
491 break;
492 case DLM_RECO_NODE_DATA_RECEIVING:
493 mlog(0, "already receiving recovery data from "
494 "node %u for dead node %u\n",
495 ndata->node_num, dead_node);
496 break;
497 case DLM_RECO_NODE_DATA_DONE:
498 mlog(0, "already DONE receiving recovery data "
499 "from node %u for dead node %u\n",
500 ndata->node_num, dead_node);
501 break;
502 }
503 }
504
505 mlog(0, "done requesting all lock info\n");
506
507 /* nodes should be sending reco data now
508 * just need to wait */
509
510 while (1) {
511 /* check all the nodes now to see if we are
512 * done, or if anyone died */
513 all_nodes_done = 1;
514 spin_lock(&dlm_reco_state_lock);
515 list_for_each(iter, &dlm->reco.node_data) {
516 ndata = list_entry (iter, struct dlm_reco_node_data, list);
517
518 mlog(0, "checking recovery state of node %u\n",
519 ndata->node_num);
520 switch (ndata->state) {
521 case DLM_RECO_NODE_DATA_INIT:
522 case DLM_RECO_NODE_DATA_REQUESTING:
523 mlog(ML_ERROR, "bad ndata state for "
524 "node %u: state=%d\n",
525 ndata->node_num, ndata->state);
526 BUG();
527 break;
528 case DLM_RECO_NODE_DATA_DEAD:
529 mlog(ML_NOTICE, "node %u died after "
530 "requesting recovery info for "
531 "node %u\n", ndata->node_num,
532 dead_node);
533 spin_unlock(&dlm_reco_state_lock);
534 // start all over
535 destroy = 1;
536 status = -EAGAIN;
537 /* instead of spinning like crazy here,
538 * wait for the domain map to catch up
539 * with the network state. otherwise this
540 * can be hit hundreds of times before
541 * the node is really seen as dead. */
542 wait_event_timeout(dlm->dlm_reco_thread_wq,
543 dlm_is_node_dead(dlm,
544 ndata->node_num),
545 msecs_to_jiffies(1000));
546 mlog(0, "waited 1 sec for %u, "
547 "dead? %s\n", ndata->node_num,
548 dlm_is_node_dead(dlm, ndata->node_num) ?
549 "yes" : "no");
550 goto leave;
551 case DLM_RECO_NODE_DATA_RECEIVING:
552 case DLM_RECO_NODE_DATA_REQUESTED:
553 all_nodes_done = 0;
554 break;
555 case DLM_RECO_NODE_DATA_DONE:
556 break;
557 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
558 break;
559 }
560 }
561 spin_unlock(&dlm_reco_state_lock);
562
563 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
564 all_nodes_done?"yes":"no");
565 if (all_nodes_done) {
566 int ret;
567
568 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
569 * just send a finalize message to everyone and
570 * clean up */
571 mlog(0, "all nodes are done! send finalize\n");
572 ret = dlm_send_finalize_reco_message(dlm);
573 if (ret < 0)
574 mlog_errno(ret);
575
576 spin_lock(&dlm->spinlock);
577 dlm_finish_local_lockres_recovery(dlm, dead_node,
578 dlm->node_num);
579 spin_unlock(&dlm->spinlock);
580 mlog(0, "should be done with recovery!\n");
581
582 mlog(0, "finishing recovery of %s at %lu, "
583 "dead=%u, this=%u, new=%u\n", dlm->name,
584 jiffies, dlm->reco.dead_node,
585 dlm->node_num, dlm->reco.new_master);
586 destroy = 1;
587 status = ret;
588 /* rescan everything marked dirty along the way */
589 dlm_kick_thread(dlm, NULL);
590 break;
591 }
592 /* wait to be signalled, with periodic timeout
593 * to check for node death */
594 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
595 kthread_should_stop(),
596 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
597
598 }
599
600 leave:
601 if (destroy)
602 dlm_destroy_recovery_area(dlm, dead_node);
603
604 mlog_exit(status);
605 return status;
606 }
607
608 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
609 {
610 int num=0;
611 struct dlm_reco_node_data *ndata;
612
613 spin_lock(&dlm->spinlock);
614 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
615 /* nodes can only be removed (by dying) after dropping
616 * this lock, and death will be trapped later, so this should do */
617 spin_unlock(&dlm->spinlock);
618
619 while (1) {
620 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
621 if (num >= O2NM_MAX_NODES) {
622 break;
623 }
624 BUG_ON(num == dead_node);
625
626 ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL);
627 if (!ndata) {
628 dlm_destroy_recovery_area(dlm, dead_node);
629 return -ENOMEM;
630 }
631 ndata->node_num = num;
632 ndata->state = DLM_RECO_NODE_DATA_INIT;
633 spin_lock(&dlm_reco_state_lock);
634 list_add_tail(&ndata->list, &dlm->reco.node_data);
635 spin_unlock(&dlm_reco_state_lock);
636 num++;
637 }
638
639 return 0;
640 }
641
642 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
643 {
644 struct list_head *iter, *iter2;
645 struct dlm_reco_node_data *ndata;
646 LIST_HEAD(tmplist);
647
648 spin_lock(&dlm_reco_state_lock);
649 list_splice_init(&dlm->reco.node_data, &tmplist);
650 spin_unlock(&dlm_reco_state_lock);
651
652 list_for_each_safe(iter, iter2, &tmplist) {
653 ndata = list_entry (iter, struct dlm_reco_node_data, list);
654 list_del_init(&ndata->list);
655 kfree(ndata);
656 }
657 }
658
659 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
660 u8 dead_node)
661 {
662 struct dlm_lock_request lr;
663 enum dlm_status ret;
664
665 mlog(0, "\n");
666
667
668 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
669 "to %u\n", dead_node, request_from);
670
671 memset(&lr, 0, sizeof(lr));
672 lr.node_idx = dlm->node_num;
673 lr.dead_node = dead_node;
674
675 // send message
676 ret = DLM_NOLOCKMGR;
677 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
678 &lr, sizeof(lr), request_from, NULL);
679
680 /* negative status is handled by caller */
681 if (ret < 0)
682 mlog_errno(ret);
683
684 // return from here, then
685 // sleep until all received or error
686 return ret;
687
688 }
689
690 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
691 {
692 struct dlm_ctxt *dlm = data;
693 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
694 char *buf = NULL;
695 struct dlm_work_item *item = NULL;
696
697 if (!dlm_grab(dlm))
698 return -EINVAL;
699
700 BUG_ON(lr->dead_node != dlm->reco.dead_node);
701
702 item = kcalloc(1, sizeof(*item), GFP_KERNEL);
703 if (!item) {
704 dlm_put(dlm);
705 return -ENOMEM;
706 }
707
708 /* this will get freed by dlm_request_all_locks_worker */
709 buf = (char *) __get_free_page(GFP_KERNEL);
710 if (!buf) {
711 kfree(item);
712 dlm_put(dlm);
713 return -ENOMEM;
714 }
715
716 /* queue up work for dlm_request_all_locks_worker */
717 dlm_grab(dlm); /* get an extra ref for the work item */
718 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
719 item->u.ral.reco_master = lr->node_idx;
720 item->u.ral.dead_node = lr->dead_node;
721 spin_lock(&dlm->work_lock);
722 list_add_tail(&item->list, &dlm->work_list);
723 spin_unlock(&dlm->work_lock);
724 schedule_work(&dlm->dispatched_work);
725
726 dlm_put(dlm);
727 return 0;
728 }
729
730 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
731 {
732 struct dlm_migratable_lockres *mres;
733 struct dlm_lock_resource *res;
734 struct dlm_ctxt *dlm;
735 LIST_HEAD(resources);
736 struct list_head *iter;
737 int ret;
738 u8 dead_node, reco_master;
739
740 dlm = item->dlm;
741 dead_node = item->u.ral.dead_node;
742 reco_master = item->u.ral.reco_master;
743 mres = (struct dlm_migratable_lockres *)data;
744
745 if (dead_node != dlm->reco.dead_node ||
746 reco_master != dlm->reco.new_master) {
747 /* show extra debug info if the recovery state is messed */
748 mlog(ML_ERROR, "%s: bad reco state: reco(dead=%u, master=%u), "
749 "request(dead=%u, master=%u)\n",
750 dlm->name, dlm->reco.dead_node, dlm->reco.new_master,
751 dead_node, reco_master);
752 mlog(ML_ERROR, "%s: name=%.*s master=%u locks=%u/%u flags=%u "
753 "entry[0]={c=%"MLFu64",l=%u,f=%u,t=%d,ct=%d,hb=%d,n=%u}\n",
754 dlm->name, mres->lockname_len, mres->lockname, mres->master,
755 mres->num_locks, mres->total_locks, mres->flags,
756 mres->ml[0].cookie, mres->ml[0].list, mres->ml[0].flags,
757 mres->ml[0].type, mres->ml[0].convert_type,
758 mres->ml[0].highest_blocked, mres->ml[0].node);
759 BUG();
760 }
761 BUG_ON(dead_node != dlm->reco.dead_node);
762 BUG_ON(reco_master != dlm->reco.new_master);
763
764 /* lock resources should have already been moved to the
765 * dlm->reco.resources list. now move items from that list
766 * to a temp list if the dead owner matches. note that the
767 * whole cluster recovers only one node at a time, so we
768 * can safely move UNKNOWN lock resources for each recovery
769 * session. */
770 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
771
772 /* now we can begin blasting lockreses without the dlm lock */
773 list_for_each(iter, &resources) {
774 res = list_entry (iter, struct dlm_lock_resource, recovering);
775 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
776 DLM_MRES_RECOVERY);
777 if (ret < 0)
778 mlog_errno(ret);
779 }
780
781 /* move the resources back to the list */
782 spin_lock(&dlm->spinlock);
783 list_splice_init(&resources, &dlm->reco.resources);
784 spin_unlock(&dlm->spinlock);
785
786 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
787 if (ret < 0)
788 mlog_errno(ret);
789
790 free_page((unsigned long)data);
791 }
792
793
794 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
795 {
796 int ret, tmpret;
797 struct dlm_reco_data_done done_msg;
798
799 memset(&done_msg, 0, sizeof(done_msg));
800 done_msg.node_idx = dlm->node_num;
801 done_msg.dead_node = dead_node;
802 mlog(0, "sending DATA DONE message to %u, "
803 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
804 done_msg.dead_node);
805
806 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
807 sizeof(done_msg), send_to, &tmpret);
808 /* negative status is ignored by the caller */
809 if (ret >= 0)
810 ret = tmpret;
811 return ret;
812 }
813
814
815 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
816 {
817 struct dlm_ctxt *dlm = data;
818 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
819 struct list_head *iter;
820 struct dlm_reco_node_data *ndata = NULL;
821 int ret = -EINVAL;
822
823 if (!dlm_grab(dlm))
824 return -EINVAL;
825
826 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
827 "node_idx=%u, this node=%u\n", done->dead_node,
828 dlm->reco.dead_node, done->node_idx, dlm->node_num);
829 BUG_ON(done->dead_node != dlm->reco.dead_node);
830
831 spin_lock(&dlm_reco_state_lock);
832 list_for_each(iter, &dlm->reco.node_data) {
833 ndata = list_entry (iter, struct dlm_reco_node_data, list);
834 if (ndata->node_num != done->node_idx)
835 continue;
836
837 switch (ndata->state) {
838 /* should have moved beyond INIT but not to FINALIZE yet */
839 case DLM_RECO_NODE_DATA_INIT:
840 case DLM_RECO_NODE_DATA_DEAD:
841 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
842 mlog(ML_ERROR, "bad ndata state for node %u:"
843 " state=%d\n", ndata->node_num,
844 ndata->state);
845 BUG();
846 break;
847 /* these states are possible at this point, anywhere along
848 * the line of recovery */
849 case DLM_RECO_NODE_DATA_DONE:
850 case DLM_RECO_NODE_DATA_RECEIVING:
851 case DLM_RECO_NODE_DATA_REQUESTED:
852 case DLM_RECO_NODE_DATA_REQUESTING:
853 mlog(0, "node %u is DONE sending "
854 "recovery data!\n",
855 ndata->node_num);
856
857 ndata->state = DLM_RECO_NODE_DATA_DONE;
858 ret = 0;
859 break;
860 }
861 }
862 spin_unlock(&dlm_reco_state_lock);
863
864 /* wake the recovery thread, some node is done */
865 if (!ret)
866 dlm_kick_recovery_thread(dlm);
867
868 if (ret < 0)
869 mlog(ML_ERROR, "failed to find recovery node data for node "
870 "%u\n", done->node_idx);
871 dlm_put(dlm);
872
873 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
874 return ret;
875 }
876
877 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
878 struct list_head *list,
879 u8 dead_node)
880 {
881 struct dlm_lock_resource *res;
882 struct list_head *iter, *iter2;
883 struct dlm_lock *lock;
884
885 spin_lock(&dlm->spinlock);
886 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
887 res = list_entry (iter, struct dlm_lock_resource, recovering);
888 /* always prune any $RECOVERY entries for dead nodes,
889 * otherwise hangs can occur during later recovery */
890 if (dlm_is_recovery_lock(res->lockname.name,
891 res->lockname.len)) {
892 spin_lock(&res->spinlock);
893 list_for_each_entry(lock, &res->granted, list) {
894 if (lock->ml.node == dead_node) {
895 mlog(0, "AHA! there was "
896 "a $RECOVERY lock for dead "
897 "node %u (%s)!\n",
898 dead_node, dlm->name);
899 list_del_init(&lock->list);
900 dlm_lock_put(lock);
901 break;
902 }
903 }
904 spin_unlock(&res->spinlock);
905 continue;
906 }
907
908 if (res->owner == dead_node) {
909 mlog(0, "found lockres owned by dead node while "
910 "doing recovery for node %u. sending it.\n",
911 dead_node);
912 list_del_init(&res->recovering);
913 list_add_tail(&res->recovering, list);
914 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
915 mlog(0, "found UNKNOWN owner while doing recovery "
916 "for node %u. sending it.\n", dead_node);
917 list_del_init(&res->recovering);
918 list_add_tail(&res->recovering, list);
919 }
920 }
921 spin_unlock(&dlm->spinlock);
922 }
923
924 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
925 {
926 int total_locks = 0;
927 struct list_head *iter, *queue = &res->granted;
928 int i;
929
930 for (i=0; i<3; i++) {
931 list_for_each(iter, queue)
932 total_locks++;
933 queue++;
934 }
935 return total_locks;
936 }
937
938
939 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
940 struct dlm_migratable_lockres *mres,
941 u8 send_to,
942 struct dlm_lock_resource *res,
943 int total_locks)
944 {
945 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
946 int mres_total_locks = be32_to_cpu(mres->total_locks);
947 int sz, ret = 0, status = 0;
948 u8 orig_flags = mres->flags,
949 orig_master = mres->master;
950
951 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
952 if (!mres->num_locks)
953 return 0;
954
955 sz = sizeof(struct dlm_migratable_lockres) +
956 (mres->num_locks * sizeof(struct dlm_migratable_lock));
957
958 /* add an all-done flag if we reached the last lock */
959 orig_flags = mres->flags;
960 BUG_ON(total_locks > mres_total_locks);
961 if (total_locks == mres_total_locks)
962 mres->flags |= DLM_MRES_ALL_DONE;
963
964 /* send it */
965 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
966 sz, send_to, &status);
967 if (ret < 0) {
968 /* XXX: negative status is not handled.
969 * this will end up killing this node. */
970 mlog_errno(ret);
971 } else {
972 /* might get an -ENOMEM back here */
973 ret = status;
974 if (ret < 0) {
975 mlog_errno(ret);
976
977 if (ret == -EFAULT) {
978 mlog(ML_ERROR, "node %u told me to kill "
979 "myself!\n", send_to);
980 BUG();
981 }
982 }
983 }
984
985 /* zero and reinit the message buffer */
986 dlm_init_migratable_lockres(mres, res->lockname.name,
987 res->lockname.len, mres_total_locks,
988 mig_cookie, orig_flags, orig_master);
989 return ret;
990 }
991
992 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
993 const char *lockname, int namelen,
994 int total_locks, u64 cookie,
995 u8 flags, u8 master)
996 {
997 /* mres here is one full page */
998 memset(mres, 0, PAGE_SIZE);
999 mres->lockname_len = namelen;
1000 memcpy(mres->lockname, lockname, namelen);
1001 mres->num_locks = 0;
1002 mres->total_locks = cpu_to_be32(total_locks);
1003 mres->mig_cookie = cpu_to_be64(cookie);
1004 mres->flags = flags;
1005 mres->master = master;
1006 }
1007
1008
1009 /* returns 1 if this lock fills the network structure,
1010 * 0 otherwise */
1011 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1012 struct dlm_migratable_lockres *mres, int queue)
1013 {
1014 struct dlm_migratable_lock *ml;
1015 int lock_num = mres->num_locks;
1016
1017 ml = &(mres->ml[lock_num]);
1018 ml->cookie = lock->ml.cookie;
1019 ml->type = lock->ml.type;
1020 ml->convert_type = lock->ml.convert_type;
1021 ml->highest_blocked = lock->ml.highest_blocked;
1022 ml->list = queue;
1023 if (lock->lksb) {
1024 ml->flags = lock->lksb->flags;
1025 /* send our current lvb */
1026 if (ml->type == LKM_EXMODE ||
1027 ml->type == LKM_PRMODE) {
1028 /* if it is already set, this had better be a PR
1029 * and it has to match */
1030 if (mres->lvb[0] && (ml->type == LKM_EXMODE ||
1031 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1032 mlog(ML_ERROR, "mismatched lvbs!\n");
1033 __dlm_print_one_lock_resource(lock->lockres);
1034 BUG();
1035 }
1036 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1037 }
1038 }
1039 ml->node = lock->ml.node;
1040 mres->num_locks++;
1041 /* we reached the max, send this network message */
1042 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1043 return 1;
1044 return 0;
1045 }
1046
1047
1048 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1049 struct dlm_migratable_lockres *mres,
1050 u8 send_to, u8 flags)
1051 {
1052 struct list_head *queue, *iter;
1053 int total_locks, i;
1054 u64 mig_cookie = 0;
1055 struct dlm_lock *lock;
1056 int ret = 0;
1057
1058 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1059
1060 mlog(0, "sending to %u\n", send_to);
1061
1062 total_locks = dlm_num_locks_in_lockres(res);
1063 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1064 /* rare, but possible */
1065 mlog(0, "argh. lockres has %d locks. this will "
1066 "require more than one network packet to "
1067 "migrate\n", total_locks);
1068 mig_cookie = dlm_get_next_mig_cookie();
1069 }
1070
1071 dlm_init_migratable_lockres(mres, res->lockname.name,
1072 res->lockname.len, total_locks,
1073 mig_cookie, flags, res->owner);
1074
1075 total_locks = 0;
1076 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1077 queue = dlm_list_idx_to_ptr(res, i);
1078 list_for_each(iter, queue) {
1079 lock = list_entry (iter, struct dlm_lock, list);
1080
1081 /* add another lock. */
1082 total_locks++;
1083 if (!dlm_add_lock_to_array(lock, mres, i))
1084 continue;
1085
1086 /* this filled the lock message,
1087 * we must send it immediately. */
1088 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1089 res, total_locks);
1090 if (ret < 0) {
1091 // TODO
1092 mlog(ML_ERROR, "dlm_send_mig_lockres_msg "
1093 "returned %d, TODO\n", ret);
1094 BUG();
1095 }
1096 }
1097 }
1098 /* flush any remaining locks */
1099 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1100 if (ret < 0) {
1101 // TODO
1102 mlog(ML_ERROR, "dlm_send_mig_lockres_msg returned %d, "
1103 "TODO\n", ret);
1104 BUG();
1105 }
1106 return ret;
1107 }
1108
1109
1110
1111 /*
1112 * this message will contain no more than one page worth of
1113 * recovery data, and it will work on only one lockres.
1114 * there may be many locks in this page, and we may need to wait
1115 * for additional packets to complete all the locks (rare, but
1116 * possible).
1117 */
1118 /*
1119 * NOTE: the allocation error cases here are scary
1120 * we really cannot afford to fail an alloc in recovery
1121 * do we spin? returning an error only delays the problem really
1122 */
1123
1124 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1125 {
1126 struct dlm_ctxt *dlm = data;
1127 struct dlm_migratable_lockres *mres =
1128 (struct dlm_migratable_lockres *)msg->buf;
1129 int ret = 0;
1130 u8 real_master;
1131 char *buf = NULL;
1132 struct dlm_work_item *item = NULL;
1133 struct dlm_lock_resource *res = NULL;
1134
1135 if (!dlm_grab(dlm))
1136 return -EINVAL;
1137
1138 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1139
1140 real_master = mres->master;
1141 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1142 /* cannot migrate a lockres with no master */
1143 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1144 }
1145
1146 mlog(0, "%s message received from node %u\n",
1147 (mres->flags & DLM_MRES_RECOVERY) ?
1148 "recovery" : "migration", mres->master);
1149 if (mres->flags & DLM_MRES_ALL_DONE)
1150 mlog(0, "all done flag. all lockres data received!\n");
1151
1152 ret = -ENOMEM;
1153 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL);
1154 item = kcalloc(1, sizeof(*item), GFP_KERNEL);
1155 if (!buf || !item)
1156 goto leave;
1157
1158 /* lookup the lock to see if we have a secondary queue for this
1159 * already... just add the locks in and this will have its owner
1160 * and RECOVERY flag changed when it completes. */
1161 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1162 if (res) {
1163 /* this will get a ref on res */
1164 /* mark it as recovering/migrating and hash it */
1165 spin_lock(&res->spinlock);
1166 if (mres->flags & DLM_MRES_RECOVERY) {
1167 res->state |= DLM_LOCK_RES_RECOVERING;
1168 } else {
1169 if (res->state & DLM_LOCK_RES_MIGRATING) {
1170 /* this is at least the second
1171 * lockres message */
1172 mlog(0, "lock %.*s is already migrating\n",
1173 mres->lockname_len,
1174 mres->lockname);
1175 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1176 /* caller should BUG */
1177 mlog(ML_ERROR, "node is attempting to migrate "
1178 "lock %.*s, but marked as recovering!\n",
1179 mres->lockname_len, mres->lockname);
1180 ret = -EFAULT;
1181 spin_unlock(&res->spinlock);
1182 goto leave;
1183 }
1184 res->state |= DLM_LOCK_RES_MIGRATING;
1185 }
1186 spin_unlock(&res->spinlock);
1187 } else {
1188 /* need to allocate, just like if it was
1189 * mastered here normally */
1190 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1191 if (!res)
1192 goto leave;
1193
1194 /* to match the ref that we would have gotten if
1195 * dlm_lookup_lockres had succeeded */
1196 dlm_lockres_get(res);
1197
1198 /* mark it as recovering/migrating and hash it */
1199 if (mres->flags & DLM_MRES_RECOVERY)
1200 res->state |= DLM_LOCK_RES_RECOVERING;
1201 else
1202 res->state |= DLM_LOCK_RES_MIGRATING;
1203
1204 spin_lock(&dlm->spinlock);
1205 __dlm_insert_lockres(dlm, res);
1206 spin_unlock(&dlm->spinlock);
1207
1208 /* now that the new lockres is inserted,
1209 * make it usable by other processes */
1210 spin_lock(&res->spinlock);
1211 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1212 spin_unlock(&res->spinlock);
1213
1214 /* add an extra ref for just-allocated lockres
1215 * otherwise the lockres will be purged immediately */
1216 dlm_lockres_get(res);
1217
1218 }
1219
1220 /* at this point we have allocated everything we need,
1221 * and we have a hashed lockres with an extra ref and
1222 * the proper res->state flags. */
1223 ret = 0;
1224 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1225 /* migration cannot have an unknown master */
1226 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1227 mlog(0, "recovery has passed me a lockres with an "
1228 "unknown owner.. will need to requery: "
1229 "%.*s\n", mres->lockname_len, mres->lockname);
1230 } else {
1231 spin_lock(&res->spinlock);
1232 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1233 spin_unlock(&res->spinlock);
1234 }
1235
1236 /* queue up work for dlm_mig_lockres_worker */
1237 dlm_grab(dlm); /* get an extra ref for the work item */
1238 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1239 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1240 item->u.ml.lockres = res; /* already have a ref */
1241 item->u.ml.real_master = real_master;
1242 spin_lock(&dlm->work_lock);
1243 list_add_tail(&item->list, &dlm->work_list);
1244 spin_unlock(&dlm->work_lock);
1245 schedule_work(&dlm->dispatched_work);
1246
1247 leave:
1248 dlm_put(dlm);
1249 if (ret < 0) {
1250 if (buf)
1251 kfree(buf);
1252 if (item)
1253 kfree(item);
1254 }
1255
1256 mlog_exit(ret);
1257 return ret;
1258 }
1259
1260
1261 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1262 {
1263 struct dlm_ctxt *dlm = data;
1264 struct dlm_migratable_lockres *mres;
1265 int ret = 0;
1266 struct dlm_lock_resource *res;
1267 u8 real_master;
1268
1269 dlm = item->dlm;
1270 mres = (struct dlm_migratable_lockres *)data;
1271
1272 res = item->u.ml.lockres;
1273 real_master = item->u.ml.real_master;
1274
1275 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1276 /* this case is super-rare. only occurs if
1277 * node death happens during migration. */
1278 again:
1279 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1280 if (ret < 0) {
1281 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1282 ret);
1283 goto again;
1284 }
1285 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1286 mlog(0, "lockres %.*s not claimed. "
1287 "this node will take it.\n",
1288 res->lockname.len, res->lockname.name);
1289 } else {
1290 mlog(0, "master needs to respond to sender "
1291 "that node %u still owns %.*s\n",
1292 real_master, res->lockname.len,
1293 res->lockname.name);
1294 /* cannot touch this lockres */
1295 goto leave;
1296 }
1297 }
1298
1299 ret = dlm_process_recovery_data(dlm, res, mres);
1300 if (ret < 0)
1301 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1302 else
1303 mlog(0, "dlm_process_recovery_data succeeded\n");
1304
1305 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1306 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1307 ret = dlm_finish_migration(dlm, res, mres->master);
1308 if (ret < 0)
1309 mlog_errno(ret);
1310 }
1311
1312 leave:
1313 kfree(data);
1314 mlog_exit(ret);
1315 }
1316
1317
1318
1319 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1320 struct dlm_lock_resource *res,
1321 u8 *real_master)
1322 {
1323 struct dlm_node_iter iter;
1324 int nodenum;
1325 int ret = 0;
1326
1327 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1328
1329 /* we only reach here if one of the two nodes in a
1330 * migration died while the migration was in progress.
1331 * at this point we need to requery the master. we
1332 * know that the new_master got as far as creating
1333 * an mle on at least one node, but we do not know
1334 * if any nodes had actually cleared the mle and set
1335 * the master to the new_master. the old master
1336 * is supposed to set the owner to UNKNOWN in the
1337 * event of a new_master death, so the only possible
1338 * responses that we can get from nodes here are
1339 * that the master is new_master, or that the master
1340 * is UNKNOWN.
1341 * if all nodes come back with UNKNOWN then we know
1342 * the lock needs remastering here.
1343 * if any node comes back with a valid master, check
1344 * to see if that master is the one that we are
1345 * recovering. if so, then the new_master died and
1346 * we need to remaster this lock. if not, then the
1347 * new_master survived and that node will respond to
1348 * other nodes about the owner.
1349 * if there is an owner, this node needs to dump this
1350 * lockres and alert the sender that this lockres
1351 * was rejected. */
1352 spin_lock(&dlm->spinlock);
1353 dlm_node_iter_init(dlm->domain_map, &iter);
1354 spin_unlock(&dlm->spinlock);
1355
1356 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1357 /* do not send to self */
1358 if (nodenum == dlm->node_num)
1359 continue;
1360 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1361 if (ret < 0) {
1362 mlog_errno(ret);
1363 BUG();
1364 /* TODO: need to figure a way to restart this */
1365 }
1366 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1367 mlog(0, "lock master is %u\n", *real_master);
1368 break;
1369 }
1370 }
1371 return ret;
1372 }
1373
1374
1375 static int dlm_do_master_requery(struct dlm_ctxt *dlm,
1376 struct dlm_lock_resource *res,
1377 u8 nodenum, u8 *real_master)
1378 {
1379 int ret = -EINVAL;
1380 struct dlm_master_requery req;
1381 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1382
1383 memset(&req, 0, sizeof(req));
1384 req.node_idx = dlm->node_num;
1385 req.namelen = res->lockname.len;
1386 memcpy(req.name, res->lockname.name, res->lockname.len);
1387
1388 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1389 &req, sizeof(req), nodenum, &status);
1390 /* XXX: negative status not handled properly here. */
1391 if (ret < 0)
1392 mlog_errno(ret);
1393 else {
1394 BUG_ON(status < 0);
1395 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1396 *real_master = (u8) (status & 0xff);
1397 mlog(0, "node %u responded to master requery with %u\n",
1398 nodenum, *real_master);
1399 ret = 0;
1400 }
1401 return ret;
1402 }
1403
1404
1405 /* this function cannot error, so unless the sending
1406 * or receiving of the message failed, the owner can
1407 * be trusted */
1408 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
1409 {
1410 struct dlm_ctxt *dlm = data;
1411 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1412 struct dlm_lock_resource *res = NULL;
1413 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1414 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1415
1416 if (!dlm_grab(dlm)) {
1417 /* since the domain has gone away on this
1418 * node, the proper response is UNKNOWN */
1419 return master;
1420 }
1421
1422 spin_lock(&dlm->spinlock);
1423 res = __dlm_lookup_lockres(dlm, req->name, req->namelen);
1424 if (res) {
1425 spin_lock(&res->spinlock);
1426 master = res->owner;
1427 if (master == dlm->node_num) {
1428 int ret = dlm_dispatch_assert_master(dlm, res,
1429 0, 0, flags);
1430 if (ret < 0) {
1431 mlog_errno(-ENOMEM);
1432 /* retry!? */
1433 BUG();
1434 }
1435 }
1436 spin_unlock(&res->spinlock);
1437 }
1438 spin_unlock(&dlm->spinlock);
1439
1440 dlm_put(dlm);
1441 return master;
1442 }
1443
1444 static inline struct list_head *
1445 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1446 {
1447 struct list_head *ret;
1448 BUG_ON(list_num < 0);
1449 BUG_ON(list_num > 2);
1450 ret = &(res->granted);
1451 ret += list_num;
1452 return ret;
1453 }
1454 /* TODO: do ast flush business
1455 * TODO: do MIGRATING and RECOVERING spinning
1456 */
1457
1458 /*
1459 * NOTE about in-flight requests during migration:
1460 *
1461 * Before attempting the migrate, the master has marked the lockres as
1462 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1463 * requests either got queued before the MIGRATING flag got set, in which
1464 * case the lock data will reflect the change and a return message is on
1465 * the way, or the request failed to get in before MIGRATING got set. In
1466 * this case, the caller will be told to spin and wait for the MIGRATING
1467 * flag to be dropped, then recheck the master.
1468 * This holds true for the convert, cancel and unlock cases, and since lvb
1469 * updates are tied to these same messages, it applies to lvb updates as
1470 * well. For the lock case, there is no way a lock can be on the master
1471 * queue and not be on the secondary queue since the lock is always added
1472 * locally first. This means that the new target node will never be sent
1473 * a lock that he doesn't already have on the list.
1474 * In total, this means that the local lock is correct and should not be
1475 * updated to match the one sent by the master. Any messages sent back
1476 * from the master before the MIGRATING flag will bring the lock properly
1477 * up-to-date, and the change will be ordered properly for the waiter.
1478 * We will *not* attempt to modify the lock underneath the waiter.
1479 */
1480
1481 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1482 struct dlm_lock_resource *res,
1483 struct dlm_migratable_lockres *mres)
1484 {
1485 struct dlm_migratable_lock *ml;
1486 struct list_head *queue;
1487 struct dlm_lock *newlock = NULL;
1488 struct dlm_lockstatus *lksb = NULL;
1489 int ret = 0;
1490 int i;
1491 struct list_head *iter;
1492 struct dlm_lock *lock = NULL;
1493
1494 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1495 for (i=0; i<mres->num_locks; i++) {
1496 ml = &(mres->ml[i]);
1497 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1498 newlock = NULL;
1499 lksb = NULL;
1500
1501 queue = dlm_list_num_to_pointer(res, ml->list);
1502
1503 /* if the lock is for the local node it needs to
1504 * be moved to the proper location within the queue.
1505 * do not allocate a new lock structure. */
1506 if (ml->node == dlm->node_num) {
1507 /* MIGRATION ONLY! */
1508 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1509
1510 spin_lock(&res->spinlock);
1511 list_for_each(iter, queue) {
1512 lock = list_entry (iter, struct dlm_lock, list);
1513 if (lock->ml.cookie != ml->cookie)
1514 lock = NULL;
1515 else
1516 break;
1517 }
1518
1519 /* lock is always created locally first, and
1520 * destroyed locally last. it must be on the list */
1521 if (!lock) {
1522 mlog(ML_ERROR, "could not find local lock "
1523 "with cookie %"MLFu64"!\n",
1524 ml->cookie);
1525 BUG();
1526 }
1527 BUG_ON(lock->ml.node != ml->node);
1528
1529 /* see NOTE above about why we do not update
1530 * to match the master here */
1531
1532 /* move the lock to its proper place */
1533 /* do not alter lock refcount. switching lists. */
1534 list_del_init(&lock->list);
1535 list_add_tail(&lock->list, queue);
1536 spin_unlock(&res->spinlock);
1537
1538 mlog(0, "just reordered a local lock!\n");
1539 continue;
1540 }
1541
1542 /* lock is for another node. */
1543 newlock = dlm_new_lock(ml->type, ml->node,
1544 be64_to_cpu(ml->cookie), NULL);
1545 if (!newlock) {
1546 ret = -ENOMEM;
1547 goto leave;
1548 }
1549 lksb = newlock->lksb;
1550 dlm_lock_attach_lockres(newlock, res);
1551
1552 if (ml->convert_type != LKM_IVMODE) {
1553 BUG_ON(queue != &res->converting);
1554 newlock->ml.convert_type = ml->convert_type;
1555 }
1556 lksb->flags |= (ml->flags &
1557 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1558
1559 if (mres->lvb[0]) {
1560 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1561 /* other node was trying to update
1562 * lvb when node died. recreate the
1563 * lksb with the updated lvb. */
1564 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1565 } else {
1566 /* otherwise, the node is sending its
1567 * most recent valid lvb info */
1568 BUG_ON(ml->type != LKM_EXMODE &&
1569 ml->type != LKM_PRMODE);
1570 if (res->lvb[0] && (ml->type == LKM_EXMODE ||
1571 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1572 mlog(ML_ERROR, "received bad lvb!\n");
1573 __dlm_print_one_lock_resource(res);
1574 BUG();
1575 }
1576 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1577 }
1578 }
1579
1580
1581 /* NOTE:
1582 * wrt lock queue ordering and recovery:
1583 * 1. order of locks on granted queue is
1584 * meaningless.
1585 * 2. order of locks on converting queue is
1586 * LOST with the node death. sorry charlie.
1587 * 3. order of locks on the blocked queue is
1588 * also LOST.
1589 * order of locks does not affect integrity, it
1590 * just means that a lock request may get pushed
1591 * back in line as a result of the node death.
1592 * also note that for a given node the lock order
1593 * for its secondary queue locks is preserved
1594 * relative to each other, but clearly *not*
1595 * preserved relative to locks from other nodes.
1596 */
1597 spin_lock(&res->spinlock);
1598 dlm_lock_get(newlock);
1599 list_add_tail(&newlock->list, queue);
1600 spin_unlock(&res->spinlock);
1601 }
1602 mlog(0, "done running all the locks\n");
1603
1604 leave:
1605 if (ret < 0) {
1606 mlog_errno(ret);
1607 if (newlock)
1608 dlm_lock_put(newlock);
1609 }
1610
1611 mlog_exit(ret);
1612 return ret;
1613 }
1614
1615 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1616 struct dlm_lock_resource *res)
1617 {
1618 int i;
1619 struct list_head *queue, *iter, *iter2;
1620 struct dlm_lock *lock;
1621
1622 res->state |= DLM_LOCK_RES_RECOVERING;
1623 if (!list_empty(&res->recovering))
1624 list_del_init(&res->recovering);
1625 list_add_tail(&res->recovering, &dlm->reco.resources);
1626
1627 /* find any pending locks and put them back on proper list */
1628 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
1629 queue = dlm_list_idx_to_ptr(res, i);
1630 list_for_each_safe(iter, iter2, queue) {
1631 lock = list_entry (iter, struct dlm_lock, list);
1632 dlm_lock_get(lock);
1633 if (lock->convert_pending) {
1634 /* move converting lock back to granted */
1635 BUG_ON(i != DLM_CONVERTING_LIST);
1636 mlog(0, "node died with convert pending "
1637 "on %.*s. move back to granted list.\n",
1638 res->lockname.len, res->lockname.name);
1639 dlm_revert_pending_convert(res, lock);
1640 lock->convert_pending = 0;
1641 } else if (lock->lock_pending) {
1642 /* remove pending lock requests completely */
1643 BUG_ON(i != DLM_BLOCKED_LIST);
1644 mlog(0, "node died with lock pending "
1645 "on %.*s. remove from blocked list and skip.\n",
1646 res->lockname.len, res->lockname.name);
1647 /* lock will be floating until ref in
1648 * dlmlock_remote is freed after the network
1649 * call returns. ok for it to not be on any
1650 * list since no ast can be called
1651 * (the master is dead). */
1652 dlm_revert_pending_lock(res, lock);
1653 lock->lock_pending = 0;
1654 } else if (lock->unlock_pending) {
1655 /* if an unlock was in progress, treat as
1656 * if this had completed successfully
1657 * before sending this lock state to the
1658 * new master. note that the dlm_unlock
1659 * call is still responsible for calling
1660 * the unlockast. that will happen after
1661 * the network call times out. for now,
1662 * just move lists to prepare the new
1663 * recovery master. */
1664 BUG_ON(i != DLM_GRANTED_LIST);
1665 mlog(0, "node died with unlock pending "
1666 "on %.*s. remove from blocked list and skip.\n",
1667 res->lockname.len, res->lockname.name);
1668 dlm_commit_pending_unlock(res, lock);
1669 lock->unlock_pending = 0;
1670 } else if (lock->cancel_pending) {
1671 /* if a cancel was in progress, treat as
1672 * if this had completed successfully
1673 * before sending this lock state to the
1674 * new master */
1675 BUG_ON(i != DLM_CONVERTING_LIST);
1676 mlog(0, "node died with cancel pending "
1677 "on %.*s. move back to granted list.\n",
1678 res->lockname.len, res->lockname.name);
1679 dlm_commit_pending_cancel(res, lock);
1680 lock->cancel_pending = 0;
1681 }
1682 dlm_lock_put(lock);
1683 }
1684 }
1685 }
1686
1687
1688
1689 /* removes all recovered locks from the recovery list.
1690 * sets the res->owner to the new master.
1691 * unsets the RECOVERY flag and wakes waiters. */
1692 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1693 u8 dead_node, u8 new_master)
1694 {
1695 int i;
1696 struct list_head *iter, *iter2;
1697 struct hlist_node *hash_iter;
1698 struct hlist_head *bucket;
1699
1700 struct dlm_lock_resource *res;
1701
1702 mlog_entry_void();
1703
1704 assert_spin_locked(&dlm->spinlock);
1705
1706 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1707 res = list_entry (iter, struct dlm_lock_resource, recovering);
1708 if (res->owner == dead_node) {
1709 list_del_init(&res->recovering);
1710 spin_lock(&res->spinlock);
1711 dlm_change_lockres_owner(dlm, res, new_master);
1712 res->state &= ~DLM_LOCK_RES_RECOVERING;
1713 __dlm_dirty_lockres(dlm, res);
1714 spin_unlock(&res->spinlock);
1715 wake_up(&res->wq);
1716 }
1717 }
1718
1719 /* this will become unnecessary eventually, but
1720 * for now we need to run the whole hash, clear
1721 * the RECOVERING state and set the owner
1722 * if necessary */
1723 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1724 bucket = &(dlm->lockres_hash[i]);
1725 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
1726 if (res->state & DLM_LOCK_RES_RECOVERING) {
1727 if (res->owner == dead_node) {
1728 mlog(0, "(this=%u) res %.*s owner=%u "
1729 "was not on recovering list, but "
1730 "clearing state anyway\n",
1731 dlm->node_num, res->lockname.len,
1732 res->lockname.name, new_master);
1733 } else if (res->owner == dlm->node_num) {
1734 mlog(0, "(this=%u) res %.*s owner=%u "
1735 "was not on recovering list, "
1736 "owner is THIS node, clearing\n",
1737 dlm->node_num, res->lockname.len,
1738 res->lockname.name, new_master);
1739 } else
1740 continue;
1741
1742 spin_lock(&res->spinlock);
1743 dlm_change_lockres_owner(dlm, res, new_master);
1744 res->state &= ~DLM_LOCK_RES_RECOVERING;
1745 __dlm_dirty_lockres(dlm, res);
1746 spin_unlock(&res->spinlock);
1747 wake_up(&res->wq);
1748 }
1749 }
1750 }
1751 }
1752
1753 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
1754 {
1755 if (local) {
1756 if (lock->ml.type != LKM_EXMODE &&
1757 lock->ml.type != LKM_PRMODE)
1758 return 1;
1759 } else if (lock->ml.type == LKM_EXMODE)
1760 return 1;
1761 return 0;
1762 }
1763
1764 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
1765 struct dlm_lock_resource *res, u8 dead_node)
1766 {
1767 struct list_head *iter, *queue;
1768 struct dlm_lock *lock;
1769 int blank_lvb = 0, local = 0;
1770 int i;
1771 u8 search_node;
1772
1773 assert_spin_locked(&dlm->spinlock);
1774 assert_spin_locked(&res->spinlock);
1775
1776 if (res->owner == dlm->node_num)
1777 /* if this node owned the lockres, and if the dead node
1778 * had an EX when he died, blank out the lvb */
1779 search_node = dead_node;
1780 else {
1781 /* if this is a secondary lockres, and we had no EX or PR
1782 * locks granted, we can no longer trust the lvb */
1783 search_node = dlm->node_num;
1784 local = 1; /* check local state for valid lvb */
1785 }
1786
1787 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
1788 queue = dlm_list_idx_to_ptr(res, i);
1789 list_for_each(iter, queue) {
1790 lock = list_entry (iter, struct dlm_lock, list);
1791 if (lock->ml.node == search_node) {
1792 if (dlm_lvb_needs_invalidation(lock, local)) {
1793 /* zero the lksb lvb and lockres lvb */
1794 blank_lvb = 1;
1795 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
1796 }
1797 }
1798 }
1799 }
1800
1801 if (blank_lvb) {
1802 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
1803 res->lockname.len, res->lockname.name, dead_node);
1804 memset(res->lvb, 0, DLM_LVB_LEN);
1805 }
1806 }
1807
1808 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
1809 struct dlm_lock_resource *res, u8 dead_node)
1810 {
1811 struct list_head *iter, *tmpiter;
1812 struct dlm_lock *lock;
1813
1814 /* this node is the lockres master:
1815 * 1) remove any stale locks for the dead node
1816 * 2) if the dead node had an EX when he died, blank out the lvb
1817 */
1818 assert_spin_locked(&dlm->spinlock);
1819 assert_spin_locked(&res->spinlock);
1820
1821 /* TODO: check pending_asts, pending_basts here */
1822 list_for_each_safe(iter, tmpiter, &res->granted) {
1823 lock = list_entry (iter, struct dlm_lock, list);
1824 if (lock->ml.node == dead_node) {
1825 list_del_init(&lock->list);
1826 dlm_lock_put(lock);
1827 }
1828 }
1829 list_for_each_safe(iter, tmpiter, &res->converting) {
1830 lock = list_entry (iter, struct dlm_lock, list);
1831 if (lock->ml.node == dead_node) {
1832 list_del_init(&lock->list);
1833 dlm_lock_put(lock);
1834 }
1835 }
1836 list_for_each_safe(iter, tmpiter, &res->blocked) {
1837 lock = list_entry (iter, struct dlm_lock, list);
1838 if (lock->ml.node == dead_node) {
1839 list_del_init(&lock->list);
1840 dlm_lock_put(lock);
1841 }
1842 }
1843
1844 /* do not kick thread yet */
1845 __dlm_dirty_lockres(dlm, res);
1846 }
1847
1848 /* if this node is the recovery master, and there are no
1849 * locks for a given lockres owned by this node that are in
1850 * either PR or EX mode, zero out the lvb before requesting.
1851 *
1852 */
1853
1854
1855 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
1856 {
1857 struct hlist_node *iter;
1858 struct dlm_lock_resource *res;
1859 int i;
1860 struct hlist_head *bucket;
1861 struct dlm_lock *lock;
1862
1863
1864 /* purge any stale mles */
1865 dlm_clean_master_list(dlm, dead_node);
1866
1867 /*
1868 * now clean up all lock resources. there are two rules:
1869 *
1870 * 1) if the dead node was the master, move the lockres
1871 * to the recovering list. set the RECOVERING flag.
1872 * this lockres needs to be cleaned up before it can
1873 * be used further.
1874 *
1875 * 2) if this node was the master, remove all locks from
1876 * each of the lockres queues that were owned by the
1877 * dead node. once recovery finishes, the dlm thread
1878 * can be kicked again to see if any ASTs or BASTs
1879 * need to be fired as a result.
1880 */
1881 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1882 bucket = &(dlm->lockres_hash[i]);
1883 hlist_for_each_entry(res, iter, bucket, hash_node) {
1884 /* always prune any $RECOVERY entries for dead nodes,
1885 * otherwise hangs can occur during later recovery */
1886 if (dlm_is_recovery_lock(res->lockname.name,
1887 res->lockname.len)) {
1888 spin_lock(&res->spinlock);
1889 list_for_each_entry(lock, &res->granted, list) {
1890 if (lock->ml.node == dead_node) {
1891 mlog(0, "AHA! there was "
1892 "a $RECOVERY lock for dead "
1893 "node %u (%s)!\n",
1894 dead_node, dlm->name);
1895 list_del_init(&lock->list);
1896 dlm_lock_put(lock);
1897 break;
1898 }
1899 }
1900 spin_unlock(&res->spinlock);
1901 continue;
1902 }
1903 spin_lock(&res->spinlock);
1904 /* zero the lvb if necessary */
1905 dlm_revalidate_lvb(dlm, res, dead_node);
1906 if (res->owner == dead_node)
1907 dlm_move_lockres_to_recovery_list(dlm, res);
1908 else if (res->owner == dlm->node_num) {
1909 dlm_free_dead_locks(dlm, res, dead_node);
1910 __dlm_lockres_calc_usage(dlm, res);
1911 }
1912 spin_unlock(&res->spinlock);
1913 }
1914 }
1915
1916 }
1917
1918 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
1919 {
1920 assert_spin_locked(&dlm->spinlock);
1921
1922 /* check to see if the node is already considered dead */
1923 if (!test_bit(idx, dlm->live_nodes_map)) {
1924 mlog(0, "for domain %s, node %d is already dead. "
1925 "another node likely did recovery already.\n",
1926 dlm->name, idx);
1927 return;
1928 }
1929
1930 /* check to see if we do not care about this node */
1931 if (!test_bit(idx, dlm->domain_map)) {
1932 /* This also catches the case that we get a node down
1933 * but haven't joined the domain yet. */
1934 mlog(0, "node %u already removed from domain!\n", idx);
1935 return;
1936 }
1937
1938 clear_bit(idx, dlm->live_nodes_map);
1939
1940 /* Clean up join state on node death. */
1941 if (dlm->joining_node == idx) {
1942 mlog(0, "Clearing join state for node %u\n", idx);
1943 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1944 }
1945
1946 /* make sure local cleanup occurs before the heartbeat events */
1947 if (!test_bit(idx, dlm->recovery_map))
1948 dlm_do_local_recovery_cleanup(dlm, idx);
1949
1950 /* notify anything attached to the heartbeat events */
1951 dlm_hb_event_notify_attached(dlm, idx, 0);
1952
1953 mlog(0, "node %u being removed from domain map!\n", idx);
1954 clear_bit(idx, dlm->domain_map);
1955 /* wake up migration waiters if a node goes down.
1956 * perhaps later we can genericize this for other waiters. */
1957 wake_up(&dlm->migration_wq);
1958
1959 if (test_bit(idx, dlm->recovery_map))
1960 mlog(0, "domain %s, node %u already added "
1961 "to recovery map!\n", dlm->name, idx);
1962 else
1963 set_bit(idx, dlm->recovery_map);
1964 }
1965
1966 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
1967 {
1968 struct dlm_ctxt *dlm = data;
1969
1970 if (!dlm_grab(dlm))
1971 return;
1972
1973 spin_lock(&dlm->spinlock);
1974 __dlm_hb_node_down(dlm, idx);
1975 spin_unlock(&dlm->spinlock);
1976
1977 dlm_put(dlm);
1978 }
1979
1980 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
1981 {
1982 struct dlm_ctxt *dlm = data;
1983
1984 if (!dlm_grab(dlm))
1985 return;
1986
1987 spin_lock(&dlm->spinlock);
1988 set_bit(idx, dlm->live_nodes_map);
1989 /* do NOT notify mle attached to the heartbeat events.
1990 * new nodes are not interesting in mastery until joined. */
1991 spin_unlock(&dlm->spinlock);
1992
1993 dlm_put(dlm);
1994 }
1995
1996 static void dlm_reco_ast(void *astdata)
1997 {
1998 struct dlm_ctxt *dlm = astdata;
1999 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2000 dlm->node_num, dlm->name);
2001 }
2002 static void dlm_reco_bast(void *astdata, int blocked_type)
2003 {
2004 struct dlm_ctxt *dlm = astdata;
2005 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2006 dlm->node_num, dlm->name);
2007 }
2008 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2009 {
2010 mlog(0, "unlockast for recovery lock fired!\n");
2011 }
2012
2013 /*
2014 * dlm_pick_recovery_master will continually attempt to use
2015 * dlmlock() on the special "$RECOVERY" lockres with the
2016 * LKM_NOQUEUE flag to get an EX. every thread that enters
2017 * this function on each node racing to become the recovery
2018 * master will not stop attempting this until either:
2019 * a) this node gets the EX (and becomes the recovery master),
2020 * or b) dlm->reco.new_master gets set to some nodenum
2021 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2022 * so each time a recovery master is needed, the entire cluster
2023 * will sync at this point. if the new master dies, that will
2024 * be detected in dlm_do_recovery */
2025 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2026 {
2027 enum dlm_status ret;
2028 struct dlm_lockstatus lksb;
2029 int status = -EINVAL;
2030
2031 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2032 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2033 again:
2034 memset(&lksb, 0, sizeof(lksb));
2035
2036 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2037 DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
2038
2039 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2040 dlm->name, ret, lksb.status);
2041
2042 if (ret == DLM_NORMAL) {
2043 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2044 dlm->name, dlm->node_num);
2045
2046 /* got the EX lock. check to see if another node
2047 * just became the reco master */
2048 if (dlm_reco_master_ready(dlm)) {
2049 mlog(0, "%s: got reco EX lock, but %u will "
2050 "do the recovery\n", dlm->name,
2051 dlm->reco.new_master);
2052 status = -EEXIST;
2053 } else {
2054 status = 0;
2055
2056 /* see if recovery was already finished elsewhere */
2057 spin_lock(&dlm->spinlock);
2058 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2059 status = -EINVAL;
2060 mlog(0, "%s: got reco EX lock, but "
2061 "node got recovered already\n", dlm->name);
2062 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2063 mlog(ML_ERROR, "%s: new master is %u "
2064 "but no dead node!\n",
2065 dlm->name, dlm->reco.new_master);
2066 BUG();
2067 }
2068 }
2069 spin_unlock(&dlm->spinlock);
2070 }
2071
2072 /* if this node has actually become the recovery master,
2073 * set the master and send the messages to begin recovery */
2074 if (!status) {
2075 mlog(0, "%s: dead=%u, this=%u, sending "
2076 "begin_reco now\n", dlm->name,
2077 dlm->reco.dead_node, dlm->node_num);
2078 status = dlm_send_begin_reco_message(dlm,
2079 dlm->reco.dead_node);
2080 /* this always succeeds */
2081 BUG_ON(status);
2082
2083 /* set the new_master to this node */
2084 spin_lock(&dlm->spinlock);
2085 dlm->reco.new_master = dlm->node_num;
2086 spin_unlock(&dlm->spinlock);
2087 }
2088
2089 /* recovery lock is a special case. ast will not get fired,
2090 * so just go ahead and unlock it. */
2091 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2092 if (ret == DLM_DENIED) {
2093 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2094 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2095 }
2096 if (ret != DLM_NORMAL) {
2097 /* this would really suck. this could only happen
2098 * if there was a network error during the unlock
2099 * because of node death. this means the unlock
2100 * is actually "done" and the lock structure is
2101 * even freed. we can continue, but only
2102 * because this specific lock name is special. */
2103 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2104 }
2105 } else if (ret == DLM_NOTQUEUED) {
2106 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2107 dlm->name, dlm->node_num);
2108 /* another node is master. wait on
2109 * reco.new_master != O2NM_INVALID_NODE_NUM
2110 * for at most one second */
2111 wait_event_timeout(dlm->dlm_reco_thread_wq,
2112 dlm_reco_master_ready(dlm),
2113 msecs_to_jiffies(1000));
2114 if (!dlm_reco_master_ready(dlm)) {
2115 mlog(0, "%s: reco master taking awhile\n",
2116 dlm->name);
2117 goto again;
2118 }
2119 /* another node has informed this one that it is reco master */
2120 mlog(0, "%s: reco master %u is ready to recover %u\n",
2121 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2122 status = -EEXIST;
2123 } else {
2124 struct dlm_lock_resource *res;
2125
2126 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2127 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2128 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2129 dlm_errname(lksb.status));
2130 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2131 DLM_RECOVERY_LOCK_NAME_LEN);
2132 if (res) {
2133 dlm_print_one_lock_resource(res);
2134 dlm_lockres_put(res);
2135 } else {
2136 mlog(ML_ERROR, "recovery lock not found\n");
2137 }
2138 BUG();
2139 }
2140
2141 return status;
2142 }
2143
2144 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2145 {
2146 struct dlm_begin_reco br;
2147 int ret = 0;
2148 struct dlm_node_iter iter;
2149 int nodenum;
2150 int status;
2151
2152 mlog_entry("%u\n", dead_node);
2153
2154 mlog(0, "dead node is %u\n", dead_node);
2155
2156 spin_lock(&dlm->spinlock);
2157 dlm_node_iter_init(dlm->domain_map, &iter);
2158 spin_unlock(&dlm->spinlock);
2159
2160 clear_bit(dead_node, iter.node_map);
2161
2162 memset(&br, 0, sizeof(br));
2163 br.node_idx = dlm->node_num;
2164 br.dead_node = dead_node;
2165
2166 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2167 ret = 0;
2168 if (nodenum == dead_node) {
2169 mlog(0, "not sending begin reco to dead node "
2170 "%u\n", dead_node);
2171 continue;
2172 }
2173 if (nodenum == dlm->node_num) {
2174 mlog(0, "not sending begin reco to self\n");
2175 continue;
2176 }
2177 retry:
2178 ret = -EINVAL;
2179 mlog(0, "attempting to send begin reco msg to %d\n",
2180 nodenum);
2181 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2182 &br, sizeof(br), nodenum, &status);
2183 /* negative status is handled ok by caller here */
2184 if (ret >= 0)
2185 ret = status;
2186 if (dlm_is_host_down(ret)) {
2187 /* node is down. not involved in recovery
2188 * so just keep going */
2189 mlog(0, "%s: node %u was down when sending "
2190 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2191 ret = 0;
2192 }
2193 if (ret < 0) {
2194 struct dlm_lock_resource *res;
2195 /* this is now a serious problem, possibly ENOMEM
2196 * in the network stack. must retry */
2197 mlog_errno(ret);
2198 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2199 " returned %d\n", dlm->name, nodenum, ret);
2200 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2201 DLM_RECOVERY_LOCK_NAME_LEN);
2202 if (res) {
2203 dlm_print_one_lock_resource(res);
2204 dlm_lockres_put(res);
2205 } else {
2206 mlog(ML_ERROR, "recovery lock not found\n");
2207 }
2208 /* sleep for a bit in hopes that we can avoid
2209 * another ENOMEM */
2210 msleep(100);
2211 goto retry;
2212 }
2213 }
2214
2215 return ret;
2216 }
2217
2218 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2219 {
2220 struct dlm_ctxt *dlm = data;
2221 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2222
2223 /* ok to return 0, domain has gone away */
2224 if (!dlm_grab(dlm))
2225 return 0;
2226
2227 mlog(0, "node %u wants to recover node %u\n",
2228 br->node_idx, br->dead_node);
2229
2230 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2231
2232 spin_lock(&dlm->spinlock);
2233 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2234 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2235 mlog(0, "%s: new_master %u died, changing "
2236 "to %u\n", dlm->name, dlm->reco.new_master,
2237 br->node_idx);
2238 } else {
2239 mlog(0, "%s: new_master %u NOT DEAD, changing "
2240 "to %u\n", dlm->name, dlm->reco.new_master,
2241 br->node_idx);
2242 /* may not have seen the new master as dead yet */
2243 }
2244 }
2245 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2246 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2247 "node %u changing it to %u\n", dlm->name,
2248 dlm->reco.dead_node, br->node_idx, br->dead_node);
2249 }
2250 dlm->reco.new_master = br->node_idx;
2251 dlm->reco.dead_node = br->dead_node;
2252 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2253 mlog(0, "recovery master %u sees %u as dead, but this "
2254 "node has not yet. marking %u as dead\n",
2255 br->node_idx, br->dead_node, br->dead_node);
2256 if (!test_bit(br->dead_node, dlm->domain_map) ||
2257 !test_bit(br->dead_node, dlm->live_nodes_map))
2258 mlog(0, "%u not in domain/live_nodes map "
2259 "so setting it in reco map manually\n",
2260 br->dead_node);
2261 set_bit(br->dead_node, dlm->recovery_map);
2262 __dlm_hb_node_down(dlm, br->dead_node);
2263 }
2264 spin_unlock(&dlm->spinlock);
2265
2266 dlm_kick_recovery_thread(dlm);
2267 dlm_put(dlm);
2268 return 0;
2269 }
2270
2271 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2272 {
2273 int ret = 0;
2274 struct dlm_finalize_reco fr;
2275 struct dlm_node_iter iter;
2276 int nodenum;
2277 int status;
2278
2279 mlog(0, "finishing recovery for node %s:%u\n",
2280 dlm->name, dlm->reco.dead_node);
2281
2282 spin_lock(&dlm->spinlock);
2283 dlm_node_iter_init(dlm->domain_map, &iter);
2284 spin_unlock(&dlm->spinlock);
2285
2286 memset(&fr, 0, sizeof(fr));
2287 fr.node_idx = dlm->node_num;
2288 fr.dead_node = dlm->reco.dead_node;
2289
2290 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2291 if (nodenum == dlm->node_num)
2292 continue;
2293 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2294 &fr, sizeof(fr), nodenum, &status);
2295 if (ret >= 0) {
2296 ret = status;
2297 if (dlm_is_host_down(ret)) {
2298 /* this has no effect on this recovery
2299 * session, so set the status to zero to
2300 * finish out the last recovery */
2301 mlog(ML_ERROR, "node %u went down after this "
2302 "node finished recovery.\n", nodenum);
2303 ret = 0;
2304 }
2305 }
2306 if (ret < 0) {
2307 mlog_errno(ret);
2308 break;
2309 }
2310 }
2311
2312 return ret;
2313 }
2314
2315 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2316 {
2317 struct dlm_ctxt *dlm = data;
2318 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2319
2320 /* ok to return 0, domain has gone away */
2321 if (!dlm_grab(dlm))
2322 return 0;
2323
2324 mlog(0, "node %u finalizing recovery of node %u\n",
2325 fr->node_idx, fr->dead_node);
2326
2327 spin_lock(&dlm->spinlock);
2328
2329 if (dlm->reco.new_master != fr->node_idx) {
2330 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2331 "%u is supposed to be the new master, dead=%u\n",
2332 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2333 BUG();
2334 }
2335 if (dlm->reco.dead_node != fr->dead_node) {
2336 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2337 "node %u, but node %u is supposed to be dead\n",
2338 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2339 BUG();
2340 }
2341
2342 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2343
2344 spin_unlock(&dlm->spinlock);
2345
2346 dlm_reset_recovery(dlm);
2347
2348 dlm_kick_recovery_thread(dlm);
2349 dlm_put(dlm);
2350 return 0;
2351 }