1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54 #include "cluster/masklog.h"
56 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt
*dlm
, u8 dead_node
);
58 static int dlm_recovery_thread(void *data
);
59 void dlm_complete_recovery_thread(struct dlm_ctxt
*dlm
);
60 int dlm_launch_recovery_thread(struct dlm_ctxt
*dlm
);
61 static void dlm_kick_recovery_thread(struct dlm_ctxt
*dlm
);
62 static int dlm_do_recovery(struct dlm_ctxt
*dlm
);
64 static int dlm_pick_recovery_master(struct dlm_ctxt
*dlm
);
65 static int dlm_remaster_locks(struct dlm_ctxt
*dlm
, u8 dead_node
);
66 static int dlm_init_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
);
67 static int dlm_request_all_locks(struct dlm_ctxt
*dlm
,
68 u8 request_from
, u8 dead_node
);
69 static void dlm_destroy_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
);
71 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource
*res
);
72 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres
*mres
,
73 const char *lockname
, int namelen
,
74 int total_locks
, u64 cookie
,
76 static int dlm_send_mig_lockres_msg(struct dlm_ctxt
*dlm
,
77 struct dlm_migratable_lockres
*mres
,
79 struct dlm_lock_resource
*res
,
81 static int dlm_lockres_master_requery(struct dlm_ctxt
*dlm
,
82 struct dlm_lock_resource
*res
,
84 static int dlm_process_recovery_data(struct dlm_ctxt
*dlm
,
85 struct dlm_lock_resource
*res
,
86 struct dlm_migratable_lockres
*mres
);
87 static int dlm_do_master_requery(struct dlm_ctxt
*dlm
,
88 struct dlm_lock_resource
*res
,
89 u8 nodenum
, u8
*real_master
);
90 static int dlm_send_finalize_reco_message(struct dlm_ctxt
*dlm
);
91 static int dlm_send_all_done_msg(struct dlm_ctxt
*dlm
,
92 u8 dead_node
, u8 send_to
);
93 static int dlm_send_begin_reco_message(struct dlm_ctxt
*dlm
, u8 dead_node
);
94 static void dlm_move_reco_locks_to_list(struct dlm_ctxt
*dlm
,
95 struct list_head
*list
, u8 dead_node
);
96 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt
*dlm
,
97 u8 dead_node
, u8 new_master
);
98 static void dlm_reco_ast(void *astdata
);
99 static void dlm_reco_bast(void *astdata
, int blocked_type
);
100 static void dlm_reco_unlock_ast(void *astdata
, enum dlm_status st
);
101 static void dlm_request_all_locks_worker(struct dlm_work_item
*item
,
103 static void dlm_mig_lockres_worker(struct dlm_work_item
*item
, void *data
);
105 static u64
dlm_get_next_mig_cookie(void);
107 static spinlock_t dlm_reco_state_lock
= SPIN_LOCK_UNLOCKED
;
108 static spinlock_t dlm_mig_cookie_lock
= SPIN_LOCK_UNLOCKED
;
109 static u64 dlm_mig_cookie
= 1;
111 static u64
dlm_get_next_mig_cookie(void)
114 spin_lock(&dlm_mig_cookie_lock
);
116 if (dlm_mig_cookie
== (~0ULL))
120 spin_unlock(&dlm_mig_cookie_lock
);
124 static inline void dlm_reset_recovery(struct dlm_ctxt
*dlm
)
126 spin_lock(&dlm
->spinlock
);
127 clear_bit(dlm
->reco
.dead_node
, dlm
->recovery_map
);
128 dlm
->reco
.dead_node
= O2NM_INVALID_NODE_NUM
;
129 dlm
->reco
.new_master
= O2NM_INVALID_NODE_NUM
;
130 spin_unlock(&dlm
->spinlock
);
133 /* Worker function used during recovery. */
134 void dlm_dispatch_work(void *data
)
136 struct dlm_ctxt
*dlm
= (struct dlm_ctxt
*)data
;
138 struct list_head
*iter
, *iter2
;
139 struct dlm_work_item
*item
;
140 dlm_workfunc_t
*workfunc
;
142 spin_lock(&dlm
->work_lock
);
143 list_splice_init(&dlm
->work_list
, &tmp_list
);
144 spin_unlock(&dlm
->work_lock
);
146 list_for_each_safe(iter
, iter2
, &tmp_list
) {
147 item
= list_entry(iter
, struct dlm_work_item
, list
);
148 workfunc
= item
->func
;
149 list_del_init(&item
->list
);
151 /* already have ref on dlm to avoid having
152 * it disappear. just double-check. */
153 BUG_ON(item
->dlm
!= dlm
);
155 /* this is allowed to sleep and
156 * call network stuff */
157 workfunc(item
, item
->data
);
168 static void dlm_kick_recovery_thread(struct dlm_ctxt
*dlm
)
170 /* wake the recovery thread
171 * this will wake the reco thread in one of three places
172 * 1) sleeping with no recovery happening
173 * 2) sleeping with recovery mastered elsewhere
174 * 3) recovery mastered here, waiting on reco data */
176 wake_up(&dlm
->dlm_reco_thread_wq
);
179 /* Launch the recovery thread */
180 int dlm_launch_recovery_thread(struct dlm_ctxt
*dlm
)
182 mlog(0, "starting dlm recovery thread...\n");
184 dlm
->dlm_reco_thread_task
= kthread_run(dlm_recovery_thread
, dlm
,
186 if (IS_ERR(dlm
->dlm_reco_thread_task
)) {
187 mlog_errno(PTR_ERR(dlm
->dlm_reco_thread_task
));
188 dlm
->dlm_reco_thread_task
= NULL
;
195 void dlm_complete_recovery_thread(struct dlm_ctxt
*dlm
)
197 if (dlm
->dlm_reco_thread_task
) {
198 mlog(0, "waiting for dlm recovery thread to exit\n");
199 kthread_stop(dlm
->dlm_reco_thread_task
);
200 dlm
->dlm_reco_thread_task
= NULL
;
207 * this is lame, but here's how recovery works...
208 * 1) all recovery threads cluster wide will work on recovering
210 * 2) negotiate who will take over all the locks for the dead node.
211 * thats right... ALL the locks.
212 * 3) once a new master is chosen, everyone scans all locks
213 * and moves aside those mastered by the dead guy
214 * 4) each of these locks should be locked until recovery is done
215 * 5) the new master collects up all of secondary lock queue info
216 * one lock at a time, forcing each node to communicate back
218 * 6) each secondary lock queue responds with the full known lock info
219 * 7) once the new master has run all its locks, it sends a ALLDONE!
220 * message to everyone
221 * 8) upon receiving this message, the secondary queue node unlocks
222 * and responds to the ALLDONE
223 * 9) once the new master gets responses from everyone, he unlocks
224 * everything and recovery for this dead node is done
225 *10) go back to 2) while there are still dead nodes
230 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
232 static int dlm_recovery_thread(void *data
)
235 struct dlm_ctxt
*dlm
= data
;
236 unsigned long timeout
= msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS
);
238 mlog(0, "dlm thread running for %s...\n", dlm
->name
);
240 while (!kthread_should_stop()) {
241 if (dlm_joined(dlm
)) {
242 status
= dlm_do_recovery(dlm
);
243 if (status
== -EAGAIN
) {
244 /* do not sleep, recheck immediately. */
251 wait_event_interruptible_timeout(dlm
->dlm_reco_thread_wq
,
252 kthread_should_stop(),
256 mlog(0, "quitting DLM recovery thread\n");
260 /* returns true when the recovery master has contacted us */
261 static int dlm_reco_master_ready(struct dlm_ctxt
*dlm
)
264 spin_lock(&dlm
->spinlock
);
265 ready
= (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
);
266 spin_unlock(&dlm
->spinlock
);
270 /* returns true if node is no longer in the domain
271 * could be dead or just not joined */
272 int dlm_is_node_dead(struct dlm_ctxt
*dlm
, u8 node
)
275 spin_lock(&dlm
->spinlock
);
276 dead
= test_bit(node
, dlm
->domain_map
);
277 spin_unlock(&dlm
->spinlock
);
281 int dlm_wait_for_node_death(struct dlm_ctxt
*dlm
, u8 node
, int timeout
)
284 mlog(ML_NOTICE
, "%s: waiting %dms for notification of "
285 "death of node %u\n", dlm
->name
, timeout
, node
);
286 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
287 dlm_is_node_dead(dlm
, node
),
288 msecs_to_jiffies(timeout
));
290 mlog(ML_NOTICE
, "%s: waiting indefinitely for notification "
291 "of death of node %u\n", dlm
->name
, node
);
292 wait_event(dlm
->dlm_reco_thread_wq
,
293 dlm_is_node_dead(dlm
, node
));
295 /* for now, return 0 */
299 /* callers of the top-level api calls (dlmlock/dlmunlock) should
300 * block on the dlm->reco.event when recovery is in progress.
301 * the dlm recovery thread will set this state when it begins
302 * recovering a dead node (as the new master or not) and clear
303 * the state and wake as soon as all affected lock resources have
304 * been marked with the RECOVERY flag */
305 static int dlm_in_recovery(struct dlm_ctxt
*dlm
)
308 spin_lock(&dlm
->spinlock
);
309 in_recovery
= !!(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
);
310 spin_unlock(&dlm
->spinlock
);
315 void dlm_wait_for_recovery(struct dlm_ctxt
*dlm
)
317 wait_event(dlm
->reco
.event
, !dlm_in_recovery(dlm
));
320 static void dlm_begin_recovery(struct dlm_ctxt
*dlm
)
322 spin_lock(&dlm
->spinlock
);
323 BUG_ON(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
);
324 dlm
->reco
.state
|= DLM_RECO_STATE_ACTIVE
;
325 spin_unlock(&dlm
->spinlock
);
328 static void dlm_end_recovery(struct dlm_ctxt
*dlm
)
330 spin_lock(&dlm
->spinlock
);
331 BUG_ON(!(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
));
332 dlm
->reco
.state
&= ~DLM_RECO_STATE_ACTIVE
;
333 spin_unlock(&dlm
->spinlock
);
334 wake_up(&dlm
->reco
.event
);
337 static int dlm_do_recovery(struct dlm_ctxt
*dlm
)
342 spin_lock(&dlm
->spinlock
);
344 /* check to see if the new master has died */
345 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
&&
346 test_bit(dlm
->reco
.new_master
, dlm
->recovery_map
)) {
347 mlog(0, "new master %u died while recovering %u!\n",
348 dlm
->reco
.new_master
, dlm
->reco
.dead_node
);
349 /* unset the new_master, leave dead_node */
350 dlm
->reco
.new_master
= O2NM_INVALID_NODE_NUM
;
353 /* select a target to recover */
354 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
357 bit
= find_next_bit (dlm
->recovery_map
, O2NM_MAX_NODES
+1, 0);
358 if (bit
>= O2NM_MAX_NODES
|| bit
< 0)
359 dlm
->reco
.dead_node
= O2NM_INVALID_NODE_NUM
;
361 dlm
->reco
.dead_node
= bit
;
362 } else if (!test_bit(dlm
->reco
.dead_node
, dlm
->recovery_map
)) {
364 mlog(ML_ERROR
, "dead_node %u no longer in recovery map!\n",
365 dlm
->reco
.dead_node
);
366 dlm
->reco
.dead_node
= O2NM_INVALID_NODE_NUM
;
369 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
370 // mlog(0, "nothing to recover! sleeping now!\n");
371 spin_unlock(&dlm
->spinlock
);
372 /* return to main thread loop and sleep. */
375 mlog(0, "recovery thread found node %u in the recovery map!\n",
376 dlm
->reco
.dead_node
);
377 spin_unlock(&dlm
->spinlock
);
379 /* take write barrier */
380 /* (stops the list reshuffling thread, proxy ast handling) */
381 dlm_begin_recovery(dlm
);
383 if (dlm
->reco
.new_master
== dlm
->node_num
)
386 if (dlm
->reco
.new_master
== O2NM_INVALID_NODE_NUM
) {
387 /* choose a new master, returns 0 if this node
388 * is the master, -EEXIST if it's another node.
389 * this does not return until a new master is chosen
390 * or recovery completes entirely. */
391 ret
= dlm_pick_recovery_master(dlm
);
393 /* already notified everyone. go. */
396 mlog(0, "another node will master this recovery session.\n");
398 mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n",
399 dlm
->name
, dlm
->reco
.new_master
,
400 dlm
->node_num
, dlm
->reco
.dead_node
);
402 /* it is safe to start everything back up here
403 * because all of the dead node's lock resources
404 * have been marked as in-recovery */
405 dlm_end_recovery(dlm
);
407 /* sleep out in main dlm_recovery_thread loop. */
411 mlog(0, "mastering recovery of %s:%u here(this=%u)!\n",
412 dlm
->name
, dlm
->reco
.dead_node
, dlm
->node_num
);
414 status
= dlm_remaster_locks(dlm
, dlm
->reco
.dead_node
);
416 mlog(ML_ERROR
, "error %d remastering locks for node %u, "
417 "retrying.\n", status
, dlm
->reco
.dead_node
);
418 /* yield a bit to allow any final network messages
419 * to get handled on remaining nodes */
422 /* success! see if any other nodes need recovery */
423 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
424 dlm
->name
, dlm
->reco
.dead_node
, dlm
->node_num
);
425 dlm_reset_recovery(dlm
);
427 dlm_end_recovery(dlm
);
429 /* continue and look for another dead node */
433 static int dlm_remaster_locks(struct dlm_ctxt
*dlm
, u8 dead_node
)
436 struct dlm_reco_node_data
*ndata
;
437 struct list_head
*iter
;
442 status
= dlm_init_recovery_area(dlm
, dead_node
);
446 /* safe to access the node data list without a lock, since this
447 * process is the only one to change the list */
448 list_for_each(iter
, &dlm
->reco
.node_data
) {
449 ndata
= list_entry (iter
, struct dlm_reco_node_data
, list
);
450 BUG_ON(ndata
->state
!= DLM_RECO_NODE_DATA_INIT
);
451 ndata
->state
= DLM_RECO_NODE_DATA_REQUESTING
;
453 mlog(0, "requesting lock info from node %u\n",
456 if (ndata
->node_num
== dlm
->node_num
) {
457 ndata
->state
= DLM_RECO_NODE_DATA_DONE
;
461 status
= dlm_request_all_locks(dlm
, ndata
->node_num
, dead_node
);
464 if (dlm_is_host_down(status
))
465 ndata
->state
= DLM_RECO_NODE_DATA_DEAD
;
472 switch (ndata
->state
) {
473 case DLM_RECO_NODE_DATA_INIT
:
474 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
475 case DLM_RECO_NODE_DATA_REQUESTED
:
478 case DLM_RECO_NODE_DATA_DEAD
:
479 mlog(0, "node %u died after requesting "
480 "recovery info for node %u\n",
481 ndata
->node_num
, dead_node
);
486 case DLM_RECO_NODE_DATA_REQUESTING
:
487 ndata
->state
= DLM_RECO_NODE_DATA_REQUESTED
;
488 mlog(0, "now receiving recovery data from "
489 "node %u for dead node %u\n",
490 ndata
->node_num
, dead_node
);
492 case DLM_RECO_NODE_DATA_RECEIVING
:
493 mlog(0, "already receiving recovery data from "
494 "node %u for dead node %u\n",
495 ndata
->node_num
, dead_node
);
497 case DLM_RECO_NODE_DATA_DONE
:
498 mlog(0, "already DONE receiving recovery data "
499 "from node %u for dead node %u\n",
500 ndata
->node_num
, dead_node
);
505 mlog(0, "done requesting all lock info\n");
507 /* nodes should be sending reco data now
508 * just need to wait */
511 /* check all the nodes now to see if we are
512 * done, or if anyone died */
514 spin_lock(&dlm_reco_state_lock
);
515 list_for_each(iter
, &dlm
->reco
.node_data
) {
516 ndata
= list_entry (iter
, struct dlm_reco_node_data
, list
);
518 mlog(0, "checking recovery state of node %u\n",
520 switch (ndata
->state
) {
521 case DLM_RECO_NODE_DATA_INIT
:
522 case DLM_RECO_NODE_DATA_REQUESTING
:
523 mlog(ML_ERROR
, "bad ndata state for "
524 "node %u: state=%d\n",
525 ndata
->node_num
, ndata
->state
);
528 case DLM_RECO_NODE_DATA_DEAD
:
529 mlog(ML_NOTICE
, "node %u died after "
530 "requesting recovery info for "
531 "node %u\n", ndata
->node_num
,
533 spin_unlock(&dlm_reco_state_lock
);
537 /* instead of spinning like crazy here,
538 * wait for the domain map to catch up
539 * with the network state. otherwise this
540 * can be hit hundreds of times before
541 * the node is really seen as dead. */
542 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
543 dlm_is_node_dead(dlm
,
545 msecs_to_jiffies(1000));
546 mlog(0, "waited 1 sec for %u, "
547 "dead? %s\n", ndata
->node_num
,
548 dlm_is_node_dead(dlm
, ndata
->node_num
) ?
551 case DLM_RECO_NODE_DATA_RECEIVING
:
552 case DLM_RECO_NODE_DATA_REQUESTED
:
555 case DLM_RECO_NODE_DATA_DONE
:
557 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
561 spin_unlock(&dlm_reco_state_lock
);
563 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass
,
564 all_nodes_done
?"yes":"no");
565 if (all_nodes_done
) {
568 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
569 * just send a finalize message to everyone and
571 mlog(0, "all nodes are done! send finalize\n");
572 ret
= dlm_send_finalize_reco_message(dlm
);
576 spin_lock(&dlm
->spinlock
);
577 dlm_finish_local_lockres_recovery(dlm
, dead_node
,
579 spin_unlock(&dlm
->spinlock
);
580 mlog(0, "should be done with recovery!\n");
582 mlog(0, "finishing recovery of %s at %lu, "
583 "dead=%u, this=%u, new=%u\n", dlm
->name
,
584 jiffies
, dlm
->reco
.dead_node
,
585 dlm
->node_num
, dlm
->reco
.new_master
);
588 /* rescan everything marked dirty along the way */
589 dlm_kick_thread(dlm
, NULL
);
592 /* wait to be signalled, with periodic timeout
593 * to check for node death */
594 wait_event_interruptible_timeout(dlm
->dlm_reco_thread_wq
,
595 kthread_should_stop(),
596 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS
));
602 dlm_destroy_recovery_area(dlm
, dead_node
);
608 static int dlm_init_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
)
611 struct dlm_reco_node_data
*ndata
;
613 spin_lock(&dlm
->spinlock
);
614 memcpy(dlm
->reco
.node_map
, dlm
->domain_map
, sizeof(dlm
->domain_map
));
615 /* nodes can only be removed (by dying) after dropping
616 * this lock, and death will be trapped later, so this should do */
617 spin_unlock(&dlm
->spinlock
);
620 num
= find_next_bit (dlm
->reco
.node_map
, O2NM_MAX_NODES
, num
);
621 if (num
>= O2NM_MAX_NODES
) {
624 BUG_ON(num
== dead_node
);
626 ndata
= kcalloc(1, sizeof(*ndata
), GFP_KERNEL
);
628 dlm_destroy_recovery_area(dlm
, dead_node
);
631 ndata
->node_num
= num
;
632 ndata
->state
= DLM_RECO_NODE_DATA_INIT
;
633 spin_lock(&dlm_reco_state_lock
);
634 list_add_tail(&ndata
->list
, &dlm
->reco
.node_data
);
635 spin_unlock(&dlm_reco_state_lock
);
642 static void dlm_destroy_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
)
644 struct list_head
*iter
, *iter2
;
645 struct dlm_reco_node_data
*ndata
;
648 spin_lock(&dlm_reco_state_lock
);
649 list_splice_init(&dlm
->reco
.node_data
, &tmplist
);
650 spin_unlock(&dlm_reco_state_lock
);
652 list_for_each_safe(iter
, iter2
, &tmplist
) {
653 ndata
= list_entry (iter
, struct dlm_reco_node_data
, list
);
654 list_del_init(&ndata
->list
);
659 static int dlm_request_all_locks(struct dlm_ctxt
*dlm
, u8 request_from
,
662 struct dlm_lock_request lr
;
668 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
669 "to %u\n", dead_node
, request_from
);
671 memset(&lr
, 0, sizeof(lr
));
672 lr
.node_idx
= dlm
->node_num
;
673 lr
.dead_node
= dead_node
;
677 ret
= o2net_send_message(DLM_LOCK_REQUEST_MSG
, dlm
->key
,
678 &lr
, sizeof(lr
), request_from
, NULL
);
680 /* negative status is handled by caller */
684 // return from here, then
685 // sleep until all received or error
690 int dlm_request_all_locks_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
692 struct dlm_ctxt
*dlm
= data
;
693 struct dlm_lock_request
*lr
= (struct dlm_lock_request
*)msg
->buf
;
695 struct dlm_work_item
*item
= NULL
;
700 BUG_ON(lr
->dead_node
!= dlm
->reco
.dead_node
);
702 item
= kcalloc(1, sizeof(*item
), GFP_KERNEL
);
708 /* this will get freed by dlm_request_all_locks_worker */
709 buf
= (char *) __get_free_page(GFP_KERNEL
);
716 /* queue up work for dlm_request_all_locks_worker */
717 dlm_grab(dlm
); /* get an extra ref for the work item */
718 dlm_init_work_item(dlm
, item
, dlm_request_all_locks_worker
, buf
);
719 item
->u
.ral
.reco_master
= lr
->node_idx
;
720 item
->u
.ral
.dead_node
= lr
->dead_node
;
721 spin_lock(&dlm
->work_lock
);
722 list_add_tail(&item
->list
, &dlm
->work_list
);
723 spin_unlock(&dlm
->work_lock
);
724 schedule_work(&dlm
->dispatched_work
);
730 static void dlm_request_all_locks_worker(struct dlm_work_item
*item
, void *data
)
732 struct dlm_migratable_lockres
*mres
;
733 struct dlm_lock_resource
*res
;
734 struct dlm_ctxt
*dlm
;
735 LIST_HEAD(resources
);
736 struct list_head
*iter
;
738 u8 dead_node
, reco_master
;
741 dead_node
= item
->u
.ral
.dead_node
;
742 reco_master
= item
->u
.ral
.reco_master
;
743 mres
= (struct dlm_migratable_lockres
*)data
;
745 if (dead_node
!= dlm
->reco
.dead_node
||
746 reco_master
!= dlm
->reco
.new_master
) {
747 /* show extra debug info if the recovery state is messed */
748 mlog(ML_ERROR
, "%s: bad reco state: reco(dead=%u, master=%u), "
749 "request(dead=%u, master=%u)\n",
750 dlm
->name
, dlm
->reco
.dead_node
, dlm
->reco
.new_master
,
751 dead_node
, reco_master
);
752 mlog(ML_ERROR
, "%s: name=%.*s master=%u locks=%u/%u flags=%u "
753 "entry[0]={c=%"MLFu64
",l=%u,f=%u,t=%d,ct=%d,hb=%d,n=%u}\n",
754 dlm
->name
, mres
->lockname_len
, mres
->lockname
, mres
->master
,
755 mres
->num_locks
, mres
->total_locks
, mres
->flags
,
756 mres
->ml
[0].cookie
, mres
->ml
[0].list
, mres
->ml
[0].flags
,
757 mres
->ml
[0].type
, mres
->ml
[0].convert_type
,
758 mres
->ml
[0].highest_blocked
, mres
->ml
[0].node
);
761 BUG_ON(dead_node
!= dlm
->reco
.dead_node
);
762 BUG_ON(reco_master
!= dlm
->reco
.new_master
);
764 /* lock resources should have already been moved to the
765 * dlm->reco.resources list. now move items from that list
766 * to a temp list if the dead owner matches. note that the
767 * whole cluster recovers only one node at a time, so we
768 * can safely move UNKNOWN lock resources for each recovery
770 dlm_move_reco_locks_to_list(dlm
, &resources
, dead_node
);
772 /* now we can begin blasting lockreses without the dlm lock */
773 list_for_each(iter
, &resources
) {
774 res
= list_entry (iter
, struct dlm_lock_resource
, recovering
);
775 ret
= dlm_send_one_lockres(dlm
, res
, mres
, reco_master
,
781 /* move the resources back to the list */
782 spin_lock(&dlm
->spinlock
);
783 list_splice_init(&resources
, &dlm
->reco
.resources
);
784 spin_unlock(&dlm
->spinlock
);
786 ret
= dlm_send_all_done_msg(dlm
, dead_node
, reco_master
);
790 free_page((unsigned long)data
);
794 static int dlm_send_all_done_msg(struct dlm_ctxt
*dlm
, u8 dead_node
, u8 send_to
)
797 struct dlm_reco_data_done done_msg
;
799 memset(&done_msg
, 0, sizeof(done_msg
));
800 done_msg
.node_idx
= dlm
->node_num
;
801 done_msg
.dead_node
= dead_node
;
802 mlog(0, "sending DATA DONE message to %u, "
803 "my node=%u, dead node=%u\n", send_to
, done_msg
.node_idx
,
806 ret
= o2net_send_message(DLM_RECO_DATA_DONE_MSG
, dlm
->key
, &done_msg
,
807 sizeof(done_msg
), send_to
, &tmpret
);
808 /* negative status is ignored by the caller */
815 int dlm_reco_data_done_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
817 struct dlm_ctxt
*dlm
= data
;
818 struct dlm_reco_data_done
*done
= (struct dlm_reco_data_done
*)msg
->buf
;
819 struct list_head
*iter
;
820 struct dlm_reco_node_data
*ndata
= NULL
;
826 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
827 "node_idx=%u, this node=%u\n", done
->dead_node
,
828 dlm
->reco
.dead_node
, done
->node_idx
, dlm
->node_num
);
829 BUG_ON(done
->dead_node
!= dlm
->reco
.dead_node
);
831 spin_lock(&dlm_reco_state_lock
);
832 list_for_each(iter
, &dlm
->reco
.node_data
) {
833 ndata
= list_entry (iter
, struct dlm_reco_node_data
, list
);
834 if (ndata
->node_num
!= done
->node_idx
)
837 switch (ndata
->state
) {
838 /* should have moved beyond INIT but not to FINALIZE yet */
839 case DLM_RECO_NODE_DATA_INIT
:
840 case DLM_RECO_NODE_DATA_DEAD
:
841 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
842 mlog(ML_ERROR
, "bad ndata state for node %u:"
843 " state=%d\n", ndata
->node_num
,
847 /* these states are possible at this point, anywhere along
848 * the line of recovery */
849 case DLM_RECO_NODE_DATA_DONE
:
850 case DLM_RECO_NODE_DATA_RECEIVING
:
851 case DLM_RECO_NODE_DATA_REQUESTED
:
852 case DLM_RECO_NODE_DATA_REQUESTING
:
853 mlog(0, "node %u is DONE sending "
857 ndata
->state
= DLM_RECO_NODE_DATA_DONE
;
862 spin_unlock(&dlm_reco_state_lock
);
864 /* wake the recovery thread, some node is done */
866 dlm_kick_recovery_thread(dlm
);
869 mlog(ML_ERROR
, "failed to find recovery node data for node "
870 "%u\n", done
->node_idx
);
873 mlog(0, "leaving reco data done handler, ret=%d\n", ret
);
877 static void dlm_move_reco_locks_to_list(struct dlm_ctxt
*dlm
,
878 struct list_head
*list
,
881 struct dlm_lock_resource
*res
;
882 struct list_head
*iter
, *iter2
;
883 struct dlm_lock
*lock
;
885 spin_lock(&dlm
->spinlock
);
886 list_for_each_safe(iter
, iter2
, &dlm
->reco
.resources
) {
887 res
= list_entry (iter
, struct dlm_lock_resource
, recovering
);
888 /* always prune any $RECOVERY entries for dead nodes,
889 * otherwise hangs can occur during later recovery */
890 if (dlm_is_recovery_lock(res
->lockname
.name
,
891 res
->lockname
.len
)) {
892 spin_lock(&res
->spinlock
);
893 list_for_each_entry(lock
, &res
->granted
, list
) {
894 if (lock
->ml
.node
== dead_node
) {
895 mlog(0, "AHA! there was "
896 "a $RECOVERY lock for dead "
898 dead_node
, dlm
->name
);
899 list_del_init(&lock
->list
);
904 spin_unlock(&res
->spinlock
);
908 if (res
->owner
== dead_node
) {
909 mlog(0, "found lockres owned by dead node while "
910 "doing recovery for node %u. sending it.\n",
912 list_del_init(&res
->recovering
);
913 list_add_tail(&res
->recovering
, list
);
914 } else if (res
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
915 mlog(0, "found UNKNOWN owner while doing recovery "
916 "for node %u. sending it.\n", dead_node
);
917 list_del_init(&res
->recovering
);
918 list_add_tail(&res
->recovering
, list
);
921 spin_unlock(&dlm
->spinlock
);
924 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource
*res
)
927 struct list_head
*iter
, *queue
= &res
->granted
;
930 for (i
=0; i
<3; i
++) {
931 list_for_each(iter
, queue
)
939 static int dlm_send_mig_lockres_msg(struct dlm_ctxt
*dlm
,
940 struct dlm_migratable_lockres
*mres
,
942 struct dlm_lock_resource
*res
,
945 u64 mig_cookie
= be64_to_cpu(mres
->mig_cookie
);
946 int mres_total_locks
= be32_to_cpu(mres
->total_locks
);
947 int sz
, ret
= 0, status
= 0;
948 u8 orig_flags
= mres
->flags
,
949 orig_master
= mres
->master
;
951 BUG_ON(mres
->num_locks
> DLM_MAX_MIGRATABLE_LOCKS
);
952 if (!mres
->num_locks
)
955 sz
= sizeof(struct dlm_migratable_lockres
) +
956 (mres
->num_locks
* sizeof(struct dlm_migratable_lock
));
958 /* add an all-done flag if we reached the last lock */
959 orig_flags
= mres
->flags
;
960 BUG_ON(total_locks
> mres_total_locks
);
961 if (total_locks
== mres_total_locks
)
962 mres
->flags
|= DLM_MRES_ALL_DONE
;
965 ret
= o2net_send_message(DLM_MIG_LOCKRES_MSG
, dlm
->key
, mres
,
966 sz
, send_to
, &status
);
968 /* XXX: negative status is not handled.
969 * this will end up killing this node. */
972 /* might get an -ENOMEM back here */
977 if (ret
== -EFAULT
) {
978 mlog(ML_ERROR
, "node %u told me to kill "
979 "myself!\n", send_to
);
985 /* zero and reinit the message buffer */
986 dlm_init_migratable_lockres(mres
, res
->lockname
.name
,
987 res
->lockname
.len
, mres_total_locks
,
988 mig_cookie
, orig_flags
, orig_master
);
992 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres
*mres
,
993 const char *lockname
, int namelen
,
994 int total_locks
, u64 cookie
,
997 /* mres here is one full page */
998 memset(mres
, 0, PAGE_SIZE
);
999 mres
->lockname_len
= namelen
;
1000 memcpy(mres
->lockname
, lockname
, namelen
);
1001 mres
->num_locks
= 0;
1002 mres
->total_locks
= cpu_to_be32(total_locks
);
1003 mres
->mig_cookie
= cpu_to_be64(cookie
);
1004 mres
->flags
= flags
;
1005 mres
->master
= master
;
1009 /* returns 1 if this lock fills the network structure,
1011 static int dlm_add_lock_to_array(struct dlm_lock
*lock
,
1012 struct dlm_migratable_lockres
*mres
, int queue
)
1014 struct dlm_migratable_lock
*ml
;
1015 int lock_num
= mres
->num_locks
;
1017 ml
= &(mres
->ml
[lock_num
]);
1018 ml
->cookie
= lock
->ml
.cookie
;
1019 ml
->type
= lock
->ml
.type
;
1020 ml
->convert_type
= lock
->ml
.convert_type
;
1021 ml
->highest_blocked
= lock
->ml
.highest_blocked
;
1024 ml
->flags
= lock
->lksb
->flags
;
1025 /* send our current lvb */
1026 if (ml
->type
== LKM_EXMODE
||
1027 ml
->type
== LKM_PRMODE
) {
1028 /* if it is already set, this had better be a PR
1029 * and it has to match */
1030 if (mres
->lvb
[0] && (ml
->type
== LKM_EXMODE
||
1031 memcmp(mres
->lvb
, lock
->lksb
->lvb
, DLM_LVB_LEN
))) {
1032 mlog(ML_ERROR
, "mismatched lvbs!\n");
1033 __dlm_print_one_lock_resource(lock
->lockres
);
1036 memcpy(mres
->lvb
, lock
->lksb
->lvb
, DLM_LVB_LEN
);
1039 ml
->node
= lock
->ml
.node
;
1041 /* we reached the max, send this network message */
1042 if (mres
->num_locks
== DLM_MAX_MIGRATABLE_LOCKS
)
1048 int dlm_send_one_lockres(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
1049 struct dlm_migratable_lockres
*mres
,
1050 u8 send_to
, u8 flags
)
1052 struct list_head
*queue
, *iter
;
1055 struct dlm_lock
*lock
;
1058 BUG_ON(!(flags
& (DLM_MRES_RECOVERY
|DLM_MRES_MIGRATION
)));
1060 mlog(0, "sending to %u\n", send_to
);
1062 total_locks
= dlm_num_locks_in_lockres(res
);
1063 if (total_locks
> DLM_MAX_MIGRATABLE_LOCKS
) {
1064 /* rare, but possible */
1065 mlog(0, "argh. lockres has %d locks. this will "
1066 "require more than one network packet to "
1067 "migrate\n", total_locks
);
1068 mig_cookie
= dlm_get_next_mig_cookie();
1071 dlm_init_migratable_lockres(mres
, res
->lockname
.name
,
1072 res
->lockname
.len
, total_locks
,
1073 mig_cookie
, flags
, res
->owner
);
1076 for (i
=DLM_GRANTED_LIST
; i
<=DLM_BLOCKED_LIST
; i
++) {
1077 queue
= dlm_list_idx_to_ptr(res
, i
);
1078 list_for_each(iter
, queue
) {
1079 lock
= list_entry (iter
, struct dlm_lock
, list
);
1081 /* add another lock. */
1083 if (!dlm_add_lock_to_array(lock
, mres
, i
))
1086 /* this filled the lock message,
1087 * we must send it immediately. */
1088 ret
= dlm_send_mig_lockres_msg(dlm
, mres
, send_to
,
1092 mlog(ML_ERROR
, "dlm_send_mig_lockres_msg "
1093 "returned %d, TODO\n", ret
);
1098 /* flush any remaining locks */
1099 ret
= dlm_send_mig_lockres_msg(dlm
, mres
, send_to
, res
, total_locks
);
1102 mlog(ML_ERROR
, "dlm_send_mig_lockres_msg returned %d, "
1112 * this message will contain no more than one page worth of
1113 * recovery data, and it will work on only one lockres.
1114 * there may be many locks in this page, and we may need to wait
1115 * for additional packets to complete all the locks (rare, but
1119 * NOTE: the allocation error cases here are scary
1120 * we really cannot afford to fail an alloc in recovery
1121 * do we spin? returning an error only delays the problem really
1124 int dlm_mig_lockres_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
1126 struct dlm_ctxt
*dlm
= data
;
1127 struct dlm_migratable_lockres
*mres
=
1128 (struct dlm_migratable_lockres
*)msg
->buf
;
1132 struct dlm_work_item
*item
= NULL
;
1133 struct dlm_lock_resource
*res
= NULL
;
1138 BUG_ON(!(mres
->flags
& (DLM_MRES_RECOVERY
|DLM_MRES_MIGRATION
)));
1140 real_master
= mres
->master
;
1141 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1142 /* cannot migrate a lockres with no master */
1143 BUG_ON(!(mres
->flags
& DLM_MRES_RECOVERY
));
1146 mlog(0, "%s message received from node %u\n",
1147 (mres
->flags
& DLM_MRES_RECOVERY
) ?
1148 "recovery" : "migration", mres
->master
);
1149 if (mres
->flags
& DLM_MRES_ALL_DONE
)
1150 mlog(0, "all done flag. all lockres data received!\n");
1153 buf
= kmalloc(be16_to_cpu(msg
->data_len
), GFP_KERNEL
);
1154 item
= kcalloc(1, sizeof(*item
), GFP_KERNEL
);
1158 /* lookup the lock to see if we have a secondary queue for this
1159 * already... just add the locks in and this will have its owner
1160 * and RECOVERY flag changed when it completes. */
1161 res
= dlm_lookup_lockres(dlm
, mres
->lockname
, mres
->lockname_len
);
1163 /* this will get a ref on res */
1164 /* mark it as recovering/migrating and hash it */
1165 spin_lock(&res
->spinlock
);
1166 if (mres
->flags
& DLM_MRES_RECOVERY
) {
1167 res
->state
|= DLM_LOCK_RES_RECOVERING
;
1169 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
1170 /* this is at least the second
1171 * lockres message */
1172 mlog(0, "lock %.*s is already migrating\n",
1175 } else if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
1176 /* caller should BUG */
1177 mlog(ML_ERROR
, "node is attempting to migrate "
1178 "lock %.*s, but marked as recovering!\n",
1179 mres
->lockname_len
, mres
->lockname
);
1181 spin_unlock(&res
->spinlock
);
1184 res
->state
|= DLM_LOCK_RES_MIGRATING
;
1186 spin_unlock(&res
->spinlock
);
1188 /* need to allocate, just like if it was
1189 * mastered here normally */
1190 res
= dlm_new_lockres(dlm
, mres
->lockname
, mres
->lockname_len
);
1194 /* to match the ref that we would have gotten if
1195 * dlm_lookup_lockres had succeeded */
1196 dlm_lockres_get(res
);
1198 /* mark it as recovering/migrating and hash it */
1199 if (mres
->flags
& DLM_MRES_RECOVERY
)
1200 res
->state
|= DLM_LOCK_RES_RECOVERING
;
1202 res
->state
|= DLM_LOCK_RES_MIGRATING
;
1204 spin_lock(&dlm
->spinlock
);
1205 __dlm_insert_lockres(dlm
, res
);
1206 spin_unlock(&dlm
->spinlock
);
1208 /* now that the new lockres is inserted,
1209 * make it usable by other processes */
1210 spin_lock(&res
->spinlock
);
1211 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
1212 spin_unlock(&res
->spinlock
);
1214 /* add an extra ref for just-allocated lockres
1215 * otherwise the lockres will be purged immediately */
1216 dlm_lockres_get(res
);
1220 /* at this point we have allocated everything we need,
1221 * and we have a hashed lockres with an extra ref and
1222 * the proper res->state flags. */
1224 if (mres
->master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1225 /* migration cannot have an unknown master */
1226 BUG_ON(!(mres
->flags
& DLM_MRES_RECOVERY
));
1227 mlog(0, "recovery has passed me a lockres with an "
1228 "unknown owner.. will need to requery: "
1229 "%.*s\n", mres
->lockname_len
, mres
->lockname
);
1231 spin_lock(&res
->spinlock
);
1232 dlm_change_lockres_owner(dlm
, res
, dlm
->node_num
);
1233 spin_unlock(&res
->spinlock
);
1236 /* queue up work for dlm_mig_lockres_worker */
1237 dlm_grab(dlm
); /* get an extra ref for the work item */
1238 memcpy(buf
, msg
->buf
, be16_to_cpu(msg
->data_len
)); /* copy the whole message */
1239 dlm_init_work_item(dlm
, item
, dlm_mig_lockres_worker
, buf
);
1240 item
->u
.ml
.lockres
= res
; /* already have a ref */
1241 item
->u
.ml
.real_master
= real_master
;
1242 spin_lock(&dlm
->work_lock
);
1243 list_add_tail(&item
->list
, &dlm
->work_list
);
1244 spin_unlock(&dlm
->work_lock
);
1245 schedule_work(&dlm
->dispatched_work
);
1261 static void dlm_mig_lockres_worker(struct dlm_work_item
*item
, void *data
)
1263 struct dlm_ctxt
*dlm
= data
;
1264 struct dlm_migratable_lockres
*mres
;
1266 struct dlm_lock_resource
*res
;
1270 mres
= (struct dlm_migratable_lockres
*)data
;
1272 res
= item
->u
.ml
.lockres
;
1273 real_master
= item
->u
.ml
.real_master
;
1275 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1276 /* this case is super-rare. only occurs if
1277 * node death happens during migration. */
1279 ret
= dlm_lockres_master_requery(dlm
, res
, &real_master
);
1281 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1285 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1286 mlog(0, "lockres %.*s not claimed. "
1287 "this node will take it.\n",
1288 res
->lockname
.len
, res
->lockname
.name
);
1290 mlog(0, "master needs to respond to sender "
1291 "that node %u still owns %.*s\n",
1292 real_master
, res
->lockname
.len
,
1293 res
->lockname
.name
);
1294 /* cannot touch this lockres */
1299 ret
= dlm_process_recovery_data(dlm
, res
, mres
);
1301 mlog(0, "dlm_process_recovery_data returned %d\n", ret
);
1303 mlog(0, "dlm_process_recovery_data succeeded\n");
1305 if ((mres
->flags
& (DLM_MRES_MIGRATION
|DLM_MRES_ALL_DONE
)) ==
1306 (DLM_MRES_MIGRATION
|DLM_MRES_ALL_DONE
)) {
1307 ret
= dlm_finish_migration(dlm
, res
, mres
->master
);
1319 static int dlm_lockres_master_requery(struct dlm_ctxt
*dlm
,
1320 struct dlm_lock_resource
*res
,
1323 struct dlm_node_iter iter
;
1327 *real_master
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1329 /* we only reach here if one of the two nodes in a
1330 * migration died while the migration was in progress.
1331 * at this point we need to requery the master. we
1332 * know that the new_master got as far as creating
1333 * an mle on at least one node, but we do not know
1334 * if any nodes had actually cleared the mle and set
1335 * the master to the new_master. the old master
1336 * is supposed to set the owner to UNKNOWN in the
1337 * event of a new_master death, so the only possible
1338 * responses that we can get from nodes here are
1339 * that the master is new_master, or that the master
1341 * if all nodes come back with UNKNOWN then we know
1342 * the lock needs remastering here.
1343 * if any node comes back with a valid master, check
1344 * to see if that master is the one that we are
1345 * recovering. if so, then the new_master died and
1346 * we need to remaster this lock. if not, then the
1347 * new_master survived and that node will respond to
1348 * other nodes about the owner.
1349 * if there is an owner, this node needs to dump this
1350 * lockres and alert the sender that this lockres
1352 spin_lock(&dlm
->spinlock
);
1353 dlm_node_iter_init(dlm
->domain_map
, &iter
);
1354 spin_unlock(&dlm
->spinlock
);
1356 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
1357 /* do not send to self */
1358 if (nodenum
== dlm
->node_num
)
1360 ret
= dlm_do_master_requery(dlm
, res
, nodenum
, real_master
);
1364 /* TODO: need to figure a way to restart this */
1366 if (*real_master
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1367 mlog(0, "lock master is %u\n", *real_master
);
1375 static int dlm_do_master_requery(struct dlm_ctxt
*dlm
,
1376 struct dlm_lock_resource
*res
,
1377 u8 nodenum
, u8
*real_master
)
1380 struct dlm_master_requery req
;
1381 int status
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1383 memset(&req
, 0, sizeof(req
));
1384 req
.node_idx
= dlm
->node_num
;
1385 req
.namelen
= res
->lockname
.len
;
1386 memcpy(req
.name
, res
->lockname
.name
, res
->lockname
.len
);
1388 ret
= o2net_send_message(DLM_MASTER_REQUERY_MSG
, dlm
->key
,
1389 &req
, sizeof(req
), nodenum
, &status
);
1390 /* XXX: negative status not handled properly here. */
1395 BUG_ON(status
> DLM_LOCK_RES_OWNER_UNKNOWN
);
1396 *real_master
= (u8
) (status
& 0xff);
1397 mlog(0, "node %u responded to master requery with %u\n",
1398 nodenum
, *real_master
);
1405 /* this function cannot error, so unless the sending
1406 * or receiving of the message failed, the owner can
1408 int dlm_master_requery_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
1410 struct dlm_ctxt
*dlm
= data
;
1411 struct dlm_master_requery
*req
= (struct dlm_master_requery
*)msg
->buf
;
1412 struct dlm_lock_resource
*res
= NULL
;
1413 int master
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1414 u32 flags
= DLM_ASSERT_MASTER_REQUERY
;
1416 if (!dlm_grab(dlm
)) {
1417 /* since the domain has gone away on this
1418 * node, the proper response is UNKNOWN */
1422 spin_lock(&dlm
->spinlock
);
1423 res
= __dlm_lookup_lockres(dlm
, req
->name
, req
->namelen
);
1425 spin_lock(&res
->spinlock
);
1426 master
= res
->owner
;
1427 if (master
== dlm
->node_num
) {
1428 int ret
= dlm_dispatch_assert_master(dlm
, res
,
1431 mlog_errno(-ENOMEM
);
1436 spin_unlock(&res
->spinlock
);
1438 spin_unlock(&dlm
->spinlock
);
1444 static inline struct list_head
*
1445 dlm_list_num_to_pointer(struct dlm_lock_resource
*res
, int list_num
)
1447 struct list_head
*ret
;
1448 BUG_ON(list_num
< 0);
1449 BUG_ON(list_num
> 2);
1450 ret
= &(res
->granted
);
1454 /* TODO: do ast flush business
1455 * TODO: do MIGRATING and RECOVERING spinning
1459 * NOTE about in-flight requests during migration:
1461 * Before attempting the migrate, the master has marked the lockres as
1462 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1463 * requests either got queued before the MIGRATING flag got set, in which
1464 * case the lock data will reflect the change and a return message is on
1465 * the way, or the request failed to get in before MIGRATING got set. In
1466 * this case, the caller will be told to spin and wait for the MIGRATING
1467 * flag to be dropped, then recheck the master.
1468 * This holds true for the convert, cancel and unlock cases, and since lvb
1469 * updates are tied to these same messages, it applies to lvb updates as
1470 * well. For the lock case, there is no way a lock can be on the master
1471 * queue and not be on the secondary queue since the lock is always added
1472 * locally first. This means that the new target node will never be sent
1473 * a lock that he doesn't already have on the list.
1474 * In total, this means that the local lock is correct and should not be
1475 * updated to match the one sent by the master. Any messages sent back
1476 * from the master before the MIGRATING flag will bring the lock properly
1477 * up-to-date, and the change will be ordered properly for the waiter.
1478 * We will *not* attempt to modify the lock underneath the waiter.
1481 static int dlm_process_recovery_data(struct dlm_ctxt
*dlm
,
1482 struct dlm_lock_resource
*res
,
1483 struct dlm_migratable_lockres
*mres
)
1485 struct dlm_migratable_lock
*ml
;
1486 struct list_head
*queue
;
1487 struct dlm_lock
*newlock
= NULL
;
1488 struct dlm_lockstatus
*lksb
= NULL
;
1491 struct list_head
*iter
;
1492 struct dlm_lock
*lock
= NULL
;
1494 mlog(0, "running %d locks for this lockres\n", mres
->num_locks
);
1495 for (i
=0; i
<mres
->num_locks
; i
++) {
1496 ml
= &(mres
->ml
[i
]);
1497 BUG_ON(ml
->highest_blocked
!= LKM_IVMODE
);
1501 queue
= dlm_list_num_to_pointer(res
, ml
->list
);
1503 /* if the lock is for the local node it needs to
1504 * be moved to the proper location within the queue.
1505 * do not allocate a new lock structure. */
1506 if (ml
->node
== dlm
->node_num
) {
1507 /* MIGRATION ONLY! */
1508 BUG_ON(!(mres
->flags
& DLM_MRES_MIGRATION
));
1510 spin_lock(&res
->spinlock
);
1511 list_for_each(iter
, queue
) {
1512 lock
= list_entry (iter
, struct dlm_lock
, list
);
1513 if (lock
->ml
.cookie
!= ml
->cookie
)
1519 /* lock is always created locally first, and
1520 * destroyed locally last. it must be on the list */
1522 mlog(ML_ERROR
, "could not find local lock "
1523 "with cookie %"MLFu64
"!\n",
1527 BUG_ON(lock
->ml
.node
!= ml
->node
);
1529 /* see NOTE above about why we do not update
1530 * to match the master here */
1532 /* move the lock to its proper place */
1533 /* do not alter lock refcount. switching lists. */
1534 list_del_init(&lock
->list
);
1535 list_add_tail(&lock
->list
, queue
);
1536 spin_unlock(&res
->spinlock
);
1538 mlog(0, "just reordered a local lock!\n");
1542 /* lock is for another node. */
1543 newlock
= dlm_new_lock(ml
->type
, ml
->node
,
1544 be64_to_cpu(ml
->cookie
), NULL
);
1549 lksb
= newlock
->lksb
;
1550 dlm_lock_attach_lockres(newlock
, res
);
1552 if (ml
->convert_type
!= LKM_IVMODE
) {
1553 BUG_ON(queue
!= &res
->converting
);
1554 newlock
->ml
.convert_type
= ml
->convert_type
;
1556 lksb
->flags
|= (ml
->flags
&
1557 (DLM_LKSB_PUT_LVB
|DLM_LKSB_GET_LVB
));
1560 if (lksb
->flags
& DLM_LKSB_PUT_LVB
) {
1561 /* other node was trying to update
1562 * lvb when node died. recreate the
1563 * lksb with the updated lvb. */
1564 memcpy(lksb
->lvb
, mres
->lvb
, DLM_LVB_LEN
);
1566 /* otherwise, the node is sending its
1567 * most recent valid lvb info */
1568 BUG_ON(ml
->type
!= LKM_EXMODE
&&
1569 ml
->type
!= LKM_PRMODE
);
1570 if (res
->lvb
[0] && (ml
->type
== LKM_EXMODE
||
1571 memcmp(res
->lvb
, mres
->lvb
, DLM_LVB_LEN
))) {
1572 mlog(ML_ERROR
, "received bad lvb!\n");
1573 __dlm_print_one_lock_resource(res
);
1576 memcpy(res
->lvb
, mres
->lvb
, DLM_LVB_LEN
);
1582 * wrt lock queue ordering and recovery:
1583 * 1. order of locks on granted queue is
1585 * 2. order of locks on converting queue is
1586 * LOST with the node death. sorry charlie.
1587 * 3. order of locks on the blocked queue is
1589 * order of locks does not affect integrity, it
1590 * just means that a lock request may get pushed
1591 * back in line as a result of the node death.
1592 * also note that for a given node the lock order
1593 * for its secondary queue locks is preserved
1594 * relative to each other, but clearly *not*
1595 * preserved relative to locks from other nodes.
1597 spin_lock(&res
->spinlock
);
1598 dlm_lock_get(newlock
);
1599 list_add_tail(&newlock
->list
, queue
);
1600 spin_unlock(&res
->spinlock
);
1602 mlog(0, "done running all the locks\n");
1608 dlm_lock_put(newlock
);
1615 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt
*dlm
,
1616 struct dlm_lock_resource
*res
)
1619 struct list_head
*queue
, *iter
, *iter2
;
1620 struct dlm_lock
*lock
;
1622 res
->state
|= DLM_LOCK_RES_RECOVERING
;
1623 if (!list_empty(&res
->recovering
))
1624 list_del_init(&res
->recovering
);
1625 list_add_tail(&res
->recovering
, &dlm
->reco
.resources
);
1627 /* find any pending locks and put them back on proper list */
1628 for (i
=DLM_BLOCKED_LIST
; i
>=DLM_GRANTED_LIST
; i
--) {
1629 queue
= dlm_list_idx_to_ptr(res
, i
);
1630 list_for_each_safe(iter
, iter2
, queue
) {
1631 lock
= list_entry (iter
, struct dlm_lock
, list
);
1633 if (lock
->convert_pending
) {
1634 /* move converting lock back to granted */
1635 BUG_ON(i
!= DLM_CONVERTING_LIST
);
1636 mlog(0, "node died with convert pending "
1637 "on %.*s. move back to granted list.\n",
1638 res
->lockname
.len
, res
->lockname
.name
);
1639 dlm_revert_pending_convert(res
, lock
);
1640 lock
->convert_pending
= 0;
1641 } else if (lock
->lock_pending
) {
1642 /* remove pending lock requests completely */
1643 BUG_ON(i
!= DLM_BLOCKED_LIST
);
1644 mlog(0, "node died with lock pending "
1645 "on %.*s. remove from blocked list and skip.\n",
1646 res
->lockname
.len
, res
->lockname
.name
);
1647 /* lock will be floating until ref in
1648 * dlmlock_remote is freed after the network
1649 * call returns. ok for it to not be on any
1650 * list since no ast can be called
1651 * (the master is dead). */
1652 dlm_revert_pending_lock(res
, lock
);
1653 lock
->lock_pending
= 0;
1654 } else if (lock
->unlock_pending
) {
1655 /* if an unlock was in progress, treat as
1656 * if this had completed successfully
1657 * before sending this lock state to the
1658 * new master. note that the dlm_unlock
1659 * call is still responsible for calling
1660 * the unlockast. that will happen after
1661 * the network call times out. for now,
1662 * just move lists to prepare the new
1663 * recovery master. */
1664 BUG_ON(i
!= DLM_GRANTED_LIST
);
1665 mlog(0, "node died with unlock pending "
1666 "on %.*s. remove from blocked list and skip.\n",
1667 res
->lockname
.len
, res
->lockname
.name
);
1668 dlm_commit_pending_unlock(res
, lock
);
1669 lock
->unlock_pending
= 0;
1670 } else if (lock
->cancel_pending
) {
1671 /* if a cancel was in progress, treat as
1672 * if this had completed successfully
1673 * before sending this lock state to the
1675 BUG_ON(i
!= DLM_CONVERTING_LIST
);
1676 mlog(0, "node died with cancel pending "
1677 "on %.*s. move back to granted list.\n",
1678 res
->lockname
.len
, res
->lockname
.name
);
1679 dlm_commit_pending_cancel(res
, lock
);
1680 lock
->cancel_pending
= 0;
1689 /* removes all recovered locks from the recovery list.
1690 * sets the res->owner to the new master.
1691 * unsets the RECOVERY flag and wakes waiters. */
1692 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt
*dlm
,
1693 u8 dead_node
, u8 new_master
)
1696 struct list_head
*iter
, *iter2
, *bucket
;
1697 struct dlm_lock_resource
*res
;
1701 assert_spin_locked(&dlm
->spinlock
);
1703 list_for_each_safe(iter
, iter2
, &dlm
->reco
.resources
) {
1704 res
= list_entry (iter
, struct dlm_lock_resource
, recovering
);
1705 if (res
->owner
== dead_node
) {
1706 list_del_init(&res
->recovering
);
1707 spin_lock(&res
->spinlock
);
1708 dlm_change_lockres_owner(dlm
, res
, new_master
);
1709 res
->state
&= ~DLM_LOCK_RES_RECOVERING
;
1710 __dlm_dirty_lockres(dlm
, res
);
1711 spin_unlock(&res
->spinlock
);
1716 /* this will become unnecessary eventually, but
1717 * for now we need to run the whole hash, clear
1718 * the RECOVERING state and set the owner
1720 for (i
=0; i
<DLM_HASH_SIZE
; i
++) {
1721 bucket
= &(dlm
->resources
[i
]);
1722 list_for_each(iter
, bucket
) {
1723 res
= list_entry (iter
, struct dlm_lock_resource
, list
);
1724 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
1725 if (res
->owner
== dead_node
) {
1726 mlog(0, "(this=%u) res %.*s owner=%u "
1727 "was not on recovering list, but "
1728 "clearing state anyway\n",
1729 dlm
->node_num
, res
->lockname
.len
,
1730 res
->lockname
.name
, new_master
);
1731 } else if (res
->owner
== dlm
->node_num
) {
1732 mlog(0, "(this=%u) res %.*s owner=%u "
1733 "was not on recovering list, "
1734 "owner is THIS node, clearing\n",
1735 dlm
->node_num
, res
->lockname
.len
,
1736 res
->lockname
.name
, new_master
);
1740 spin_lock(&res
->spinlock
);
1741 dlm_change_lockres_owner(dlm
, res
, new_master
);
1742 res
->state
&= ~DLM_LOCK_RES_RECOVERING
;
1743 __dlm_dirty_lockres(dlm
, res
);
1744 spin_unlock(&res
->spinlock
);
1751 static inline int dlm_lvb_needs_invalidation(struct dlm_lock
*lock
, int local
)
1754 if (lock
->ml
.type
!= LKM_EXMODE
&&
1755 lock
->ml
.type
!= LKM_PRMODE
)
1757 } else if (lock
->ml
.type
== LKM_EXMODE
)
1762 static void dlm_revalidate_lvb(struct dlm_ctxt
*dlm
,
1763 struct dlm_lock_resource
*res
, u8 dead_node
)
1765 struct list_head
*iter
, *queue
;
1766 struct dlm_lock
*lock
;
1767 int blank_lvb
= 0, local
= 0;
1771 assert_spin_locked(&dlm
->spinlock
);
1772 assert_spin_locked(&res
->spinlock
);
1774 if (res
->owner
== dlm
->node_num
)
1775 /* if this node owned the lockres, and if the dead node
1776 * had an EX when he died, blank out the lvb */
1777 search_node
= dead_node
;
1779 /* if this is a secondary lockres, and we had no EX or PR
1780 * locks granted, we can no longer trust the lvb */
1781 search_node
= dlm
->node_num
;
1782 local
= 1; /* check local state for valid lvb */
1785 for (i
=DLM_GRANTED_LIST
; i
<=DLM_CONVERTING_LIST
; i
++) {
1786 queue
= dlm_list_idx_to_ptr(res
, i
);
1787 list_for_each(iter
, queue
) {
1788 lock
= list_entry (iter
, struct dlm_lock
, list
);
1789 if (lock
->ml
.node
== search_node
) {
1790 if (dlm_lvb_needs_invalidation(lock
, local
)) {
1791 /* zero the lksb lvb and lockres lvb */
1793 memset(lock
->lksb
->lvb
, 0, DLM_LVB_LEN
);
1800 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
1801 res
->lockname
.len
, res
->lockname
.name
, dead_node
);
1802 memset(res
->lvb
, 0, DLM_LVB_LEN
);
1806 static void dlm_free_dead_locks(struct dlm_ctxt
*dlm
,
1807 struct dlm_lock_resource
*res
, u8 dead_node
)
1809 struct list_head
*iter
, *tmpiter
;
1810 struct dlm_lock
*lock
;
1812 /* this node is the lockres master:
1813 * 1) remove any stale locks for the dead node
1814 * 2) if the dead node had an EX when he died, blank out the lvb
1816 assert_spin_locked(&dlm
->spinlock
);
1817 assert_spin_locked(&res
->spinlock
);
1819 /* TODO: check pending_asts, pending_basts here */
1820 list_for_each_safe(iter
, tmpiter
, &res
->granted
) {
1821 lock
= list_entry (iter
, struct dlm_lock
, list
);
1822 if (lock
->ml
.node
== dead_node
) {
1823 list_del_init(&lock
->list
);
1827 list_for_each_safe(iter
, tmpiter
, &res
->converting
) {
1828 lock
= list_entry (iter
, struct dlm_lock
, list
);
1829 if (lock
->ml
.node
== dead_node
) {
1830 list_del_init(&lock
->list
);
1834 list_for_each_safe(iter
, tmpiter
, &res
->blocked
) {
1835 lock
= list_entry (iter
, struct dlm_lock
, list
);
1836 if (lock
->ml
.node
== dead_node
) {
1837 list_del_init(&lock
->list
);
1842 /* do not kick thread yet */
1843 __dlm_dirty_lockres(dlm
, res
);
1846 /* if this node is the recovery master, and there are no
1847 * locks for a given lockres owned by this node that are in
1848 * either PR or EX mode, zero out the lvb before requesting.
1853 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt
*dlm
, u8 dead_node
)
1855 struct list_head
*iter
;
1856 struct dlm_lock_resource
*res
;
1858 struct list_head
*bucket
;
1859 struct dlm_lock
*lock
;
1862 /* purge any stale mles */
1863 dlm_clean_master_list(dlm
, dead_node
);
1866 * now clean up all lock resources. there are two rules:
1868 * 1) if the dead node was the master, move the lockres
1869 * to the recovering list. set the RECOVERING flag.
1870 * this lockres needs to be cleaned up before it can
1873 * 2) if this node was the master, remove all locks from
1874 * each of the lockres queues that were owned by the
1875 * dead node. once recovery finishes, the dlm thread
1876 * can be kicked again to see if any ASTs or BASTs
1877 * need to be fired as a result.
1879 for (i
=0; i
<DLM_HASH_SIZE
; i
++) {
1880 bucket
= &(dlm
->resources
[i
]);
1881 list_for_each(iter
, bucket
) {
1882 res
= list_entry (iter
, struct dlm_lock_resource
, list
);
1883 /* always prune any $RECOVERY entries for dead nodes,
1884 * otherwise hangs can occur during later recovery */
1885 if (dlm_is_recovery_lock(res
->lockname
.name
,
1886 res
->lockname
.len
)) {
1887 spin_lock(&res
->spinlock
);
1888 list_for_each_entry(lock
, &res
->granted
, list
) {
1889 if (lock
->ml
.node
== dead_node
) {
1890 mlog(0, "AHA! there was "
1891 "a $RECOVERY lock for dead "
1893 dead_node
, dlm
->name
);
1894 list_del_init(&lock
->list
);
1899 spin_unlock(&res
->spinlock
);
1902 spin_lock(&res
->spinlock
);
1903 /* zero the lvb if necessary */
1904 dlm_revalidate_lvb(dlm
, res
, dead_node
);
1905 if (res
->owner
== dead_node
)
1906 dlm_move_lockres_to_recovery_list(dlm
, res
);
1907 else if (res
->owner
== dlm
->node_num
) {
1908 dlm_free_dead_locks(dlm
, res
, dead_node
);
1909 __dlm_lockres_calc_usage(dlm
, res
);
1911 spin_unlock(&res
->spinlock
);
1917 static void __dlm_hb_node_down(struct dlm_ctxt
*dlm
, int idx
)
1919 assert_spin_locked(&dlm
->spinlock
);
1921 /* check to see if the node is already considered dead */
1922 if (!test_bit(idx
, dlm
->live_nodes_map
)) {
1923 mlog(0, "for domain %s, node %d is already dead. "
1924 "another node likely did recovery already.\n",
1929 /* check to see if we do not care about this node */
1930 if (!test_bit(idx
, dlm
->domain_map
)) {
1931 /* This also catches the case that we get a node down
1932 * but haven't joined the domain yet. */
1933 mlog(0, "node %u already removed from domain!\n", idx
);
1937 clear_bit(idx
, dlm
->live_nodes_map
);
1939 /* Clean up join state on node death. */
1940 if (dlm
->joining_node
== idx
) {
1941 mlog(0, "Clearing join state for node %u\n", idx
);
1942 __dlm_set_joining_node(dlm
, DLM_LOCK_RES_OWNER_UNKNOWN
);
1945 /* make sure local cleanup occurs before the heartbeat events */
1946 if (!test_bit(idx
, dlm
->recovery_map
))
1947 dlm_do_local_recovery_cleanup(dlm
, idx
);
1949 /* notify anything attached to the heartbeat events */
1950 dlm_hb_event_notify_attached(dlm
, idx
, 0);
1952 mlog(0, "node %u being removed from domain map!\n", idx
);
1953 clear_bit(idx
, dlm
->domain_map
);
1954 /* wake up migration waiters if a node goes down.
1955 * perhaps later we can genericize this for other waiters. */
1956 wake_up(&dlm
->migration_wq
);
1958 if (test_bit(idx
, dlm
->recovery_map
))
1959 mlog(0, "domain %s, node %u already added "
1960 "to recovery map!\n", dlm
->name
, idx
);
1962 set_bit(idx
, dlm
->recovery_map
);
1965 void dlm_hb_node_down_cb(struct o2nm_node
*node
, int idx
, void *data
)
1967 struct dlm_ctxt
*dlm
= data
;
1972 spin_lock(&dlm
->spinlock
);
1973 __dlm_hb_node_down(dlm
, idx
);
1974 spin_unlock(&dlm
->spinlock
);
1979 void dlm_hb_node_up_cb(struct o2nm_node
*node
, int idx
, void *data
)
1981 struct dlm_ctxt
*dlm
= data
;
1986 spin_lock(&dlm
->spinlock
);
1987 set_bit(idx
, dlm
->live_nodes_map
);
1988 /* do NOT notify mle attached to the heartbeat events.
1989 * new nodes are not interesting in mastery until joined. */
1990 spin_unlock(&dlm
->spinlock
);
1995 static void dlm_reco_ast(void *astdata
)
1997 struct dlm_ctxt
*dlm
= astdata
;
1998 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
1999 dlm
->node_num
, dlm
->name
);
2001 static void dlm_reco_bast(void *astdata
, int blocked_type
)
2003 struct dlm_ctxt
*dlm
= astdata
;
2004 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2005 dlm
->node_num
, dlm
->name
);
2007 static void dlm_reco_unlock_ast(void *astdata
, enum dlm_status st
)
2009 mlog(0, "unlockast for recovery lock fired!\n");
2013 * dlm_pick_recovery_master will continually attempt to use
2014 * dlmlock() on the special "$RECOVERY" lockres with the
2015 * LKM_NOQUEUE flag to get an EX. every thread that enters
2016 * this function on each node racing to become the recovery
2017 * master will not stop attempting this until either:
2018 * a) this node gets the EX (and becomes the recovery master),
2019 * or b) dlm->reco.new_master gets set to some nodenum
2020 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2021 * so each time a recovery master is needed, the entire cluster
2022 * will sync at this point. if the new master dies, that will
2023 * be detected in dlm_do_recovery */
2024 static int dlm_pick_recovery_master(struct dlm_ctxt
*dlm
)
2026 enum dlm_status ret
;
2027 struct dlm_lockstatus lksb
;
2028 int status
= -EINVAL
;
2030 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2031 dlm
->name
, jiffies
, dlm
->reco
.dead_node
, dlm
->node_num
);
2033 memset(&lksb
, 0, sizeof(lksb
));
2035 ret
= dlmlock(dlm
, LKM_EXMODE
, &lksb
, LKM_NOQUEUE
|LKM_RECOVERY
,
2036 DLM_RECOVERY_LOCK_NAME
, dlm_reco_ast
, dlm
, dlm_reco_bast
);
2038 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2039 dlm
->name
, ret
, lksb
.status
);
2041 if (ret
== DLM_NORMAL
) {
2042 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2043 dlm
->name
, dlm
->node_num
);
2045 /* got the EX lock. check to see if another node
2046 * just became the reco master */
2047 if (dlm_reco_master_ready(dlm
)) {
2048 mlog(0, "%s: got reco EX lock, but %u will "
2049 "do the recovery\n", dlm
->name
,
2050 dlm
->reco
.new_master
);
2055 /* see if recovery was already finished elsewhere */
2056 spin_lock(&dlm
->spinlock
);
2057 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
2059 mlog(0, "%s: got reco EX lock, but "
2060 "node got recovered already\n", dlm
->name
);
2061 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
) {
2062 mlog(ML_ERROR
, "%s: new master is %u "
2063 "but no dead node!\n",
2064 dlm
->name
, dlm
->reco
.new_master
);
2068 spin_unlock(&dlm
->spinlock
);
2071 /* if this node has actually become the recovery master,
2072 * set the master and send the messages to begin recovery */
2074 mlog(0, "%s: dead=%u, this=%u, sending "
2075 "begin_reco now\n", dlm
->name
,
2076 dlm
->reco
.dead_node
, dlm
->node_num
);
2077 status
= dlm_send_begin_reco_message(dlm
,
2078 dlm
->reco
.dead_node
);
2079 /* this always succeeds */
2082 /* set the new_master to this node */
2083 spin_lock(&dlm
->spinlock
);
2084 dlm
->reco
.new_master
= dlm
->node_num
;
2085 spin_unlock(&dlm
->spinlock
);
2088 /* recovery lock is a special case. ast will not get fired,
2089 * so just go ahead and unlock it. */
2090 ret
= dlmunlock(dlm
, &lksb
, 0, dlm_reco_unlock_ast
, dlm
);
2091 if (ret
== DLM_DENIED
) {
2092 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2093 ret
= dlmunlock(dlm
, &lksb
, LKM_CANCEL
, dlm_reco_unlock_ast
, dlm
);
2095 if (ret
!= DLM_NORMAL
) {
2096 /* this would really suck. this could only happen
2097 * if there was a network error during the unlock
2098 * because of node death. this means the unlock
2099 * is actually "done" and the lock structure is
2100 * even freed. we can continue, but only
2101 * because this specific lock name is special. */
2102 mlog(ML_ERROR
, "dlmunlock returned %d\n", ret
);
2104 } else if (ret
== DLM_NOTQUEUED
) {
2105 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2106 dlm
->name
, dlm
->node_num
);
2107 /* another node is master. wait on
2108 * reco.new_master != O2NM_INVALID_NODE_NUM
2109 * for at most one second */
2110 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
2111 dlm_reco_master_ready(dlm
),
2112 msecs_to_jiffies(1000));
2113 if (!dlm_reco_master_ready(dlm
)) {
2114 mlog(0, "%s: reco master taking awhile\n",
2118 /* another node has informed this one that it is reco master */
2119 mlog(0, "%s: reco master %u is ready to recover %u\n",
2120 dlm
->name
, dlm
->reco
.new_master
, dlm
->reco
.dead_node
);
2123 struct dlm_lock_resource
*res
;
2125 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2126 mlog(ML_ERROR
, "%s: got %s from dlmlock($RECOVERY), "
2127 "lksb.status=%s\n", dlm
->name
, dlm_errname(ret
),
2128 dlm_errname(lksb
.status
));
2129 res
= dlm_lookup_lockres(dlm
, DLM_RECOVERY_LOCK_NAME
,
2130 DLM_RECOVERY_LOCK_NAME_LEN
);
2132 dlm_print_one_lock_resource(res
);
2133 dlm_lockres_put(res
);
2135 mlog(ML_ERROR
, "recovery lock not found\n");
2143 static int dlm_send_begin_reco_message(struct dlm_ctxt
*dlm
, u8 dead_node
)
2145 struct dlm_begin_reco br
;
2147 struct dlm_node_iter iter
;
2151 mlog_entry("%u\n", dead_node
);
2153 mlog(0, "dead node is %u\n", dead_node
);
2155 spin_lock(&dlm
->spinlock
);
2156 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2157 spin_unlock(&dlm
->spinlock
);
2159 clear_bit(dead_node
, iter
.node_map
);
2161 memset(&br
, 0, sizeof(br
));
2162 br
.node_idx
= dlm
->node_num
;
2163 br
.dead_node
= dead_node
;
2165 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
2167 if (nodenum
== dead_node
) {
2168 mlog(0, "not sending begin reco to dead node "
2172 if (nodenum
== dlm
->node_num
) {
2173 mlog(0, "not sending begin reco to self\n");
2178 mlog(0, "attempting to send begin reco msg to %d\n",
2180 ret
= o2net_send_message(DLM_BEGIN_RECO_MSG
, dlm
->key
,
2181 &br
, sizeof(br
), nodenum
, &status
);
2182 /* negative status is handled ok by caller here */
2185 if (dlm_is_host_down(ret
)) {
2186 /* node is down. not involved in recovery
2187 * so just keep going */
2188 mlog(0, "%s: node %u was down when sending "
2189 "begin reco msg (%d)\n", dlm
->name
, nodenum
, ret
);
2193 struct dlm_lock_resource
*res
;
2194 /* this is now a serious problem, possibly ENOMEM
2195 * in the network stack. must retry */
2197 mlog(ML_ERROR
, "begin reco of dlm %s to node %u "
2198 " returned %d\n", dlm
->name
, nodenum
, ret
);
2199 res
= dlm_lookup_lockres(dlm
, DLM_RECOVERY_LOCK_NAME
,
2200 DLM_RECOVERY_LOCK_NAME_LEN
);
2202 dlm_print_one_lock_resource(res
);
2203 dlm_lockres_put(res
);
2205 mlog(ML_ERROR
, "recovery lock not found\n");
2207 /* sleep for a bit in hopes that we can avoid
2217 int dlm_begin_reco_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
2219 struct dlm_ctxt
*dlm
= data
;
2220 struct dlm_begin_reco
*br
= (struct dlm_begin_reco
*)msg
->buf
;
2222 /* ok to return 0, domain has gone away */
2226 mlog(0, "node %u wants to recover node %u\n",
2227 br
->node_idx
, br
->dead_node
);
2229 dlm_fire_domain_eviction_callbacks(dlm
, br
->dead_node
);
2231 spin_lock(&dlm
->spinlock
);
2232 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
) {
2233 if (test_bit(dlm
->reco
.new_master
, dlm
->recovery_map
)) {
2234 mlog(0, "%s: new_master %u died, changing "
2235 "to %u\n", dlm
->name
, dlm
->reco
.new_master
,
2238 mlog(0, "%s: new_master %u NOT DEAD, changing "
2239 "to %u\n", dlm
->name
, dlm
->reco
.new_master
,
2241 /* may not have seen the new master as dead yet */
2244 if (dlm
->reco
.dead_node
!= O2NM_INVALID_NODE_NUM
) {
2245 mlog(ML_NOTICE
, "%s: dead_node previously set to %u, "
2246 "node %u changing it to %u\n", dlm
->name
,
2247 dlm
->reco
.dead_node
, br
->node_idx
, br
->dead_node
);
2249 dlm
->reco
.new_master
= br
->node_idx
;
2250 dlm
->reco
.dead_node
= br
->dead_node
;
2251 if (!test_bit(br
->dead_node
, dlm
->recovery_map
)) {
2252 mlog(0, "recovery master %u sees %u as dead, but this "
2253 "node has not yet. marking %u as dead\n",
2254 br
->node_idx
, br
->dead_node
, br
->dead_node
);
2255 if (!test_bit(br
->dead_node
, dlm
->domain_map
) ||
2256 !test_bit(br
->dead_node
, dlm
->live_nodes_map
))
2257 mlog(0, "%u not in domain/live_nodes map "
2258 "so setting it in reco map manually\n",
2260 set_bit(br
->dead_node
, dlm
->recovery_map
);
2261 __dlm_hb_node_down(dlm
, br
->dead_node
);
2263 spin_unlock(&dlm
->spinlock
);
2265 dlm_kick_recovery_thread(dlm
);
2270 static int dlm_send_finalize_reco_message(struct dlm_ctxt
*dlm
)
2273 struct dlm_finalize_reco fr
;
2274 struct dlm_node_iter iter
;
2278 mlog(0, "finishing recovery for node %s:%u\n",
2279 dlm
->name
, dlm
->reco
.dead_node
);
2281 spin_lock(&dlm
->spinlock
);
2282 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2283 spin_unlock(&dlm
->spinlock
);
2285 memset(&fr
, 0, sizeof(fr
));
2286 fr
.node_idx
= dlm
->node_num
;
2287 fr
.dead_node
= dlm
->reco
.dead_node
;
2289 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
2290 if (nodenum
== dlm
->node_num
)
2292 ret
= o2net_send_message(DLM_FINALIZE_RECO_MSG
, dlm
->key
,
2293 &fr
, sizeof(fr
), nodenum
, &status
);
2296 if (dlm_is_host_down(ret
)) {
2297 /* this has no effect on this recovery
2298 * session, so set the status to zero to
2299 * finish out the last recovery */
2300 mlog(ML_ERROR
, "node %u went down after this "
2301 "node finished recovery.\n", nodenum
);
2314 int dlm_finalize_reco_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
2316 struct dlm_ctxt
*dlm
= data
;
2317 struct dlm_finalize_reco
*fr
= (struct dlm_finalize_reco
*)msg
->buf
;
2319 /* ok to return 0, domain has gone away */
2323 mlog(0, "node %u finalizing recovery of node %u\n",
2324 fr
->node_idx
, fr
->dead_node
);
2326 spin_lock(&dlm
->spinlock
);
2328 if (dlm
->reco
.new_master
!= fr
->node_idx
) {
2329 mlog(ML_ERROR
, "node %u sent recovery finalize msg, but node "
2330 "%u is supposed to be the new master, dead=%u\n",
2331 fr
->node_idx
, dlm
->reco
.new_master
, fr
->dead_node
);
2334 if (dlm
->reco
.dead_node
!= fr
->dead_node
) {
2335 mlog(ML_ERROR
, "node %u sent recovery finalize msg for dead "
2336 "node %u, but node %u is supposed to be dead\n",
2337 fr
->node_idx
, fr
->dead_node
, dlm
->reco
.dead_node
);
2341 dlm_finish_local_lockres_recovery(dlm
, fr
->dead_node
, fr
->node_idx
);
2343 spin_unlock(&dlm
->spinlock
);
2345 dlm_reset_recovery(dlm
);
2347 dlm_kick_recovery_thread(dlm
);