1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
49 #include "dlmcommon.h"
51 #include "dlmdomain.h"
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
54 #include "cluster/masklog.h"
65 u8 name
[DLM_LOCKID_NAME_MAX
];
68 struct dlm_master_list_entry
70 struct list_head list
;
71 struct list_head hb_events
;
78 unsigned long maybe_map
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
79 unsigned long vote_map
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
80 unsigned long response_map
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
81 unsigned long node_map
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
84 enum dlm_mle_type type
;
85 struct o2hb_callback_func mle_hb_up
;
86 struct o2hb_callback_func mle_hb_down
;
88 struct dlm_lock_resource
*res
;
89 struct dlm_lock_name name
;
93 static void dlm_mle_node_down(struct dlm_ctxt
*dlm
,
94 struct dlm_master_list_entry
*mle
,
95 struct o2nm_node
*node
,
97 static void dlm_mle_node_up(struct dlm_ctxt
*dlm
,
98 struct dlm_master_list_entry
*mle
,
99 struct o2nm_node
*node
,
102 static void dlm_assert_master_worker(struct dlm_work_item
*item
, void *data
);
103 static int dlm_do_assert_master(struct dlm_ctxt
*dlm
, const char *lockname
,
104 unsigned int namelen
, void *nodemap
,
107 static inline int dlm_mle_equal(struct dlm_ctxt
*dlm
,
108 struct dlm_master_list_entry
*mle
,
110 unsigned int namelen
)
112 struct dlm_lock_resource
*res
;
117 if (mle
->type
== DLM_MLE_BLOCK
||
118 mle
->type
== DLM_MLE_MIGRATION
) {
119 if (namelen
!= mle
->u
.name
.len
||
120 memcmp(name
, mle
->u
.name
.name
, namelen
)!=0)
124 if (namelen
!= res
->lockname
.len
||
125 memcmp(res
->lockname
.name
, name
, namelen
) != 0)
132 /* Code here is included but defined out as it aids debugging */
134 #define dlm_print_nodemap(m) _dlm_print_nodemap(m,#m)
135 void _dlm_print_nodemap(unsigned long *map
, const char *mapname
)
138 printk("%s=[ ", mapname
);
139 for (i
=0; i
<O2NM_MAX_NODES
; i
++)
140 if (test_bit(i
, map
))
145 void dlm_print_one_mle(struct dlm_master_list_entry
*mle
)
151 unsigned int namelen
;
154 unsigned long *maybe
= mle
->maybe_map
,
155 *vote
= mle
->vote_map
,
156 *resp
= mle
->response_map
,
157 *node
= mle
->node_map
;
160 if (mle
->type
== DLM_MLE_BLOCK
)
162 else if (mle
->type
== DLM_MLE_MASTER
)
166 refs
= atomic_read(&k
->refcount
);
167 master
= mle
->master
;
168 attached
= (list_empty(&mle
->hb_events
) ? 'N' : 'Y');
170 if (mle
->type
!= DLM_MLE_MASTER
) {
171 namelen
= mle
->u
.name
.len
;
172 name
= mle
->u
.name
.name
;
174 namelen
= mle
->u
.res
->lockname
.len
;
175 name
= mle
->u
.res
->lockname
.name
;
178 mlog(ML_NOTICE
, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
179 namelen
, name
, type
, refs
, master
, mle
->new_master
, attached
,
181 dlm_print_nodemap(maybe
);
183 dlm_print_nodemap(vote
);
185 dlm_print_nodemap(resp
);
187 dlm_print_nodemap(node
);
192 static void dlm_dump_mles(struct dlm_ctxt
*dlm
)
194 struct dlm_master_list_entry
*mle
;
195 struct list_head
*iter
;
197 mlog(ML_NOTICE
, "dumping all mles for domain %s:\n", dlm
->name
);
198 spin_lock(&dlm
->master_lock
);
199 list_for_each(iter
, &dlm
->master_list
) {
200 mle
= list_entry(iter
, struct dlm_master_list_entry
, list
);
201 dlm_print_one_mle(mle
);
203 spin_unlock(&dlm
->master_lock
);
206 int dlm_dump_all_mles(const char __user
*data
, unsigned int len
)
208 struct list_head
*iter
;
209 struct dlm_ctxt
*dlm
;
211 spin_lock(&dlm_domain_lock
);
212 list_for_each(iter
, &dlm_domains
) {
213 dlm
= list_entry (iter
, struct dlm_ctxt
, list
);
214 mlog(ML_NOTICE
, "found dlm: %p, name=%s\n", dlm
, dlm
->name
);
217 spin_unlock(&dlm_domain_lock
);
220 EXPORT_SYMBOL_GPL(dlm_dump_all_mles
);
225 static kmem_cache_t
*dlm_mle_cache
= NULL
;
228 static void dlm_mle_release(struct kref
*kref
);
229 static void dlm_init_mle(struct dlm_master_list_entry
*mle
,
230 enum dlm_mle_type type
,
231 struct dlm_ctxt
*dlm
,
232 struct dlm_lock_resource
*res
,
234 unsigned int namelen
);
235 static void dlm_put_mle(struct dlm_master_list_entry
*mle
);
236 static void __dlm_put_mle(struct dlm_master_list_entry
*mle
);
237 static int dlm_find_mle(struct dlm_ctxt
*dlm
,
238 struct dlm_master_list_entry
**mle
,
239 char *name
, unsigned int namelen
);
241 static int dlm_do_master_request(struct dlm_master_list_entry
*mle
, int to
);
244 static int dlm_wait_for_lock_mastery(struct dlm_ctxt
*dlm
,
245 struct dlm_lock_resource
*res
,
246 struct dlm_master_list_entry
*mle
,
248 static int dlm_restart_lock_mastery(struct dlm_ctxt
*dlm
,
249 struct dlm_lock_resource
*res
,
250 struct dlm_master_list_entry
*mle
,
252 static int dlm_add_migration_mle(struct dlm_ctxt
*dlm
,
253 struct dlm_lock_resource
*res
,
254 struct dlm_master_list_entry
*mle
,
255 struct dlm_master_list_entry
**oldmle
,
256 const char *name
, unsigned int namelen
,
257 u8 new_master
, u8 master
);
259 static u8
dlm_pick_migration_target(struct dlm_ctxt
*dlm
,
260 struct dlm_lock_resource
*res
);
261 static void dlm_remove_nonlocal_locks(struct dlm_ctxt
*dlm
,
262 struct dlm_lock_resource
*res
);
263 static int dlm_mark_lockres_migrating(struct dlm_ctxt
*dlm
,
264 struct dlm_lock_resource
*res
,
266 static int dlm_pre_master_reco_lockres(struct dlm_ctxt
*dlm
,
267 struct dlm_lock_resource
*res
);
270 int dlm_is_host_down(int errno
)
287 case -EINVAL
: /* if returned from our tcp code,
288 this means there is no socket */
296 * MASTER LIST FUNCTIONS
301 * regarding master list entries and heartbeat callbacks:
303 * in order to avoid sleeping and allocation that occurs in
304 * heartbeat, master list entries are simply attached to the
305 * dlm's established heartbeat callbacks. the mle is attached
306 * when it is created, and since the dlm->spinlock is held at
307 * that time, any heartbeat event will be properly discovered
308 * by the mle. the mle needs to be detached from the
309 * dlm->mle_hb_events list as soon as heartbeat events are no
310 * longer useful to the mle, and before the mle is freed.
312 * as a general rule, heartbeat events are no longer needed by
313 * the mle once an "answer" regarding the lock master has been
316 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt
*dlm
,
317 struct dlm_master_list_entry
*mle
)
319 assert_spin_locked(&dlm
->spinlock
);
321 list_add_tail(&mle
->hb_events
, &dlm
->mle_hb_events
);
325 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt
*dlm
,
326 struct dlm_master_list_entry
*mle
)
328 if (!list_empty(&mle
->hb_events
))
329 list_del_init(&mle
->hb_events
);
333 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt
*dlm
,
334 struct dlm_master_list_entry
*mle
)
336 spin_lock(&dlm
->spinlock
);
337 __dlm_mle_detach_hb_events(dlm
, mle
);
338 spin_unlock(&dlm
->spinlock
);
341 static void dlm_get_mle_inuse(struct dlm_master_list_entry
*mle
)
343 struct dlm_ctxt
*dlm
;
346 assert_spin_locked(&dlm
->spinlock
);
347 assert_spin_locked(&dlm
->master_lock
);
349 kref_get(&mle
->mle_refs
);
352 static void dlm_put_mle_inuse(struct dlm_master_list_entry
*mle
)
354 struct dlm_ctxt
*dlm
;
357 spin_lock(&dlm
->spinlock
);
358 spin_lock(&dlm
->master_lock
);
361 spin_unlock(&dlm
->master_lock
);
362 spin_unlock(&dlm
->spinlock
);
366 /* remove from list and free */
367 static void __dlm_put_mle(struct dlm_master_list_entry
*mle
)
369 struct dlm_ctxt
*dlm
;
372 assert_spin_locked(&dlm
->spinlock
);
373 assert_spin_locked(&dlm
->master_lock
);
374 BUG_ON(!atomic_read(&mle
->mle_refs
.refcount
));
376 kref_put(&mle
->mle_refs
, dlm_mle_release
);
380 /* must not have any spinlocks coming in */
381 static void dlm_put_mle(struct dlm_master_list_entry
*mle
)
383 struct dlm_ctxt
*dlm
;
386 spin_lock(&dlm
->spinlock
);
387 spin_lock(&dlm
->master_lock
);
389 spin_unlock(&dlm
->master_lock
);
390 spin_unlock(&dlm
->spinlock
);
393 static inline void dlm_get_mle(struct dlm_master_list_entry
*mle
)
395 kref_get(&mle
->mle_refs
);
398 static void dlm_init_mle(struct dlm_master_list_entry
*mle
,
399 enum dlm_mle_type type
,
400 struct dlm_ctxt
*dlm
,
401 struct dlm_lock_resource
*res
,
403 unsigned int namelen
)
405 assert_spin_locked(&dlm
->spinlock
);
409 INIT_LIST_HEAD(&mle
->list
);
410 INIT_LIST_HEAD(&mle
->hb_events
);
411 memset(mle
->maybe_map
, 0, sizeof(mle
->maybe_map
));
412 spin_lock_init(&mle
->spinlock
);
413 init_waitqueue_head(&mle
->wq
);
414 atomic_set(&mle
->woken
, 0);
415 kref_init(&mle
->mle_refs
);
416 memset(mle
->response_map
, 0, sizeof(mle
->response_map
));
417 mle
->master
= O2NM_MAX_NODES
;
418 mle
->new_master
= O2NM_MAX_NODES
;
421 if (mle
->type
== DLM_MLE_MASTER
) {
424 } else if (mle
->type
== DLM_MLE_BLOCK
) {
426 memcpy(mle
->u
.name
.name
, name
, namelen
);
427 mle
->u
.name
.len
= namelen
;
428 } else /* DLM_MLE_MIGRATION */ {
430 memcpy(mle
->u
.name
.name
, name
, namelen
);
431 mle
->u
.name
.len
= namelen
;
434 /* copy off the node_map and register hb callbacks on our copy */
435 memcpy(mle
->node_map
, dlm
->domain_map
, sizeof(mle
->node_map
));
436 memcpy(mle
->vote_map
, dlm
->domain_map
, sizeof(mle
->vote_map
));
437 clear_bit(dlm
->node_num
, mle
->vote_map
);
438 clear_bit(dlm
->node_num
, mle
->node_map
);
440 /* attach the mle to the domain node up/down events */
441 __dlm_mle_attach_hb_events(dlm
, mle
);
445 /* returns 1 if found, 0 if not */
446 static int dlm_find_mle(struct dlm_ctxt
*dlm
,
447 struct dlm_master_list_entry
**mle
,
448 char *name
, unsigned int namelen
)
450 struct dlm_master_list_entry
*tmpmle
;
451 struct list_head
*iter
;
453 assert_spin_locked(&dlm
->master_lock
);
455 list_for_each(iter
, &dlm
->master_list
) {
456 tmpmle
= list_entry(iter
, struct dlm_master_list_entry
, list
);
457 if (!dlm_mle_equal(dlm
, tmpmle
, name
, namelen
))
466 void dlm_hb_event_notify_attached(struct dlm_ctxt
*dlm
, int idx
, int node_up
)
468 struct dlm_master_list_entry
*mle
;
469 struct list_head
*iter
;
471 assert_spin_locked(&dlm
->spinlock
);
473 list_for_each(iter
, &dlm
->mle_hb_events
) {
474 mle
= list_entry(iter
, struct dlm_master_list_entry
,
477 dlm_mle_node_up(dlm
, mle
, NULL
, idx
);
479 dlm_mle_node_down(dlm
, mle
, NULL
, idx
);
483 static void dlm_mle_node_down(struct dlm_ctxt
*dlm
,
484 struct dlm_master_list_entry
*mle
,
485 struct o2nm_node
*node
, int idx
)
487 spin_lock(&mle
->spinlock
);
489 if (!test_bit(idx
, mle
->node_map
))
490 mlog(0, "node %u already removed from nodemap!\n", idx
);
492 clear_bit(idx
, mle
->node_map
);
494 spin_unlock(&mle
->spinlock
);
497 static void dlm_mle_node_up(struct dlm_ctxt
*dlm
,
498 struct dlm_master_list_entry
*mle
,
499 struct o2nm_node
*node
, int idx
)
501 spin_lock(&mle
->spinlock
);
503 if (test_bit(idx
, mle
->node_map
))
504 mlog(0, "node %u already in node map!\n", idx
);
506 set_bit(idx
, mle
->node_map
);
508 spin_unlock(&mle
->spinlock
);
512 int dlm_init_mle_cache(void)
514 dlm_mle_cache
= kmem_cache_create("dlm_mle_cache",
515 sizeof(struct dlm_master_list_entry
),
516 0, SLAB_HWCACHE_ALIGN
,
518 if (dlm_mle_cache
== NULL
)
523 void dlm_destroy_mle_cache(void)
526 kmem_cache_destroy(dlm_mle_cache
);
529 static void dlm_mle_release(struct kref
*kref
)
531 struct dlm_master_list_entry
*mle
;
532 struct dlm_ctxt
*dlm
;
536 mle
= container_of(kref
, struct dlm_master_list_entry
, mle_refs
);
539 if (mle
->type
!= DLM_MLE_MASTER
) {
540 mlog(0, "calling mle_release for %.*s, type %d\n",
541 mle
->u
.name
.len
, mle
->u
.name
.name
, mle
->type
);
543 mlog(0, "calling mle_release for %.*s, type %d\n",
544 mle
->u
.res
->lockname
.len
,
545 mle
->u
.res
->lockname
.name
, mle
->type
);
547 assert_spin_locked(&dlm
->spinlock
);
548 assert_spin_locked(&dlm
->master_lock
);
550 /* remove from list if not already */
551 if (!list_empty(&mle
->list
))
552 list_del_init(&mle
->list
);
554 /* detach the mle from the domain node up/down events */
555 __dlm_mle_detach_hb_events(dlm
, mle
);
557 /* NOTE: kfree under spinlock here.
558 * if this is bad, we can move this to a freelist. */
559 kmem_cache_free(dlm_mle_cache
, mle
);
564 * LOCK RESOURCE FUNCTIONS
567 static void dlm_set_lockres_owner(struct dlm_ctxt
*dlm
,
568 struct dlm_lock_resource
*res
,
571 assert_spin_locked(&res
->spinlock
);
573 mlog_entry("%.*s, %u\n", res
->lockname
.len
, res
->lockname
.name
, owner
);
575 if (owner
== dlm
->node_num
)
576 atomic_inc(&dlm
->local_resources
);
577 else if (owner
== DLM_LOCK_RES_OWNER_UNKNOWN
)
578 atomic_inc(&dlm
->unknown_resources
);
580 atomic_inc(&dlm
->remote_resources
);
585 void dlm_change_lockres_owner(struct dlm_ctxt
*dlm
,
586 struct dlm_lock_resource
*res
, u8 owner
)
588 assert_spin_locked(&res
->spinlock
);
590 if (owner
== res
->owner
)
593 if (res
->owner
== dlm
->node_num
)
594 atomic_dec(&dlm
->local_resources
);
595 else if (res
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
)
596 atomic_dec(&dlm
->unknown_resources
);
598 atomic_dec(&dlm
->remote_resources
);
600 dlm_set_lockres_owner(dlm
, res
, owner
);
604 static void dlm_lockres_release(struct kref
*kref
)
606 struct dlm_lock_resource
*res
;
608 res
= container_of(kref
, struct dlm_lock_resource
, refs
);
610 /* This should not happen -- all lockres' have a name
611 * associated with them at init time. */
612 BUG_ON(!res
->lockname
.name
);
614 mlog(0, "destroying lockres %.*s\n", res
->lockname
.len
,
617 /* By the time we're ready to blow this guy away, we shouldn't
618 * be on any lists. */
619 BUG_ON(!hlist_unhashed(&res
->hash_node
));
620 BUG_ON(!list_empty(&res
->granted
));
621 BUG_ON(!list_empty(&res
->converting
));
622 BUG_ON(!list_empty(&res
->blocked
));
623 BUG_ON(!list_empty(&res
->dirty
));
624 BUG_ON(!list_empty(&res
->recovering
));
625 BUG_ON(!list_empty(&res
->purge
));
627 kfree(res
->lockname
.name
);
632 void dlm_lockres_put(struct dlm_lock_resource
*res
)
634 kref_put(&res
->refs
, dlm_lockres_release
);
637 static void dlm_init_lockres(struct dlm_ctxt
*dlm
,
638 struct dlm_lock_resource
*res
,
639 const char *name
, unsigned int namelen
)
643 /* If we memset here, we lose our reference to the kmalloc'd
644 * res->lockname.name, so be sure to init every field
647 qname
= (char *) res
->lockname
.name
;
648 memcpy(qname
, name
, namelen
);
650 res
->lockname
.len
= namelen
;
651 res
->lockname
.hash
= dlm_lockid_hash(name
, namelen
);
653 init_waitqueue_head(&res
->wq
);
654 spin_lock_init(&res
->spinlock
);
655 INIT_HLIST_NODE(&res
->hash_node
);
656 INIT_LIST_HEAD(&res
->granted
);
657 INIT_LIST_HEAD(&res
->converting
);
658 INIT_LIST_HEAD(&res
->blocked
);
659 INIT_LIST_HEAD(&res
->dirty
);
660 INIT_LIST_HEAD(&res
->recovering
);
661 INIT_LIST_HEAD(&res
->purge
);
662 atomic_set(&res
->asts_reserved
, 0);
663 res
->migration_pending
= 0;
665 kref_init(&res
->refs
);
667 /* just for consistency */
668 spin_lock(&res
->spinlock
);
669 dlm_set_lockres_owner(dlm
, res
, DLM_LOCK_RES_OWNER_UNKNOWN
);
670 spin_unlock(&res
->spinlock
);
672 res
->state
= DLM_LOCK_RES_IN_PROGRESS
;
676 memset(res
->lvb
, 0, DLM_LVB_LEN
);
679 struct dlm_lock_resource
*dlm_new_lockres(struct dlm_ctxt
*dlm
,
681 unsigned int namelen
)
683 struct dlm_lock_resource
*res
;
685 res
= kmalloc(sizeof(struct dlm_lock_resource
), GFP_KERNEL
);
689 res
->lockname
.name
= kmalloc(namelen
, GFP_KERNEL
);
690 if (!res
->lockname
.name
) {
695 dlm_init_lockres(dlm
, res
, name
, namelen
);
700 * lookup a lock resource by name.
701 * may already exist in the hashtable.
702 * lockid is null terminated
704 * if not, allocate enough for the lockres and for
705 * the temporary structure used in doing the mastering.
707 * also, do a lookup in the dlm->master_list to see
708 * if another node has begun mastering the same lock.
709 * if so, there should be a block entry in there
710 * for this name, and we should *not* attempt to master
711 * the lock here. need to wait around for that node
712 * to assert_master (or die).
715 struct dlm_lock_resource
* dlm_get_lock_resource(struct dlm_ctxt
*dlm
,
719 struct dlm_lock_resource
*tmpres
=NULL
, *res
=NULL
;
720 struct dlm_master_list_entry
*mle
= NULL
;
721 struct dlm_master_list_entry
*alloc_mle
= NULL
;
724 struct dlm_node_iter iter
;
725 unsigned int namelen
, hash
;
727 int bit
, wait_on_recovery
= 0;
731 namelen
= strlen(lockid
);
732 hash
= dlm_lockid_hash(lockid
, namelen
);
734 mlog(0, "get lockres %s (len %d)\n", lockid
, namelen
);
737 spin_lock(&dlm
->spinlock
);
738 tmpres
= __dlm_lookup_lockres(dlm
, lockid
, namelen
, hash
);
740 spin_unlock(&dlm
->spinlock
);
741 mlog(0, "found in hash!\n");
743 dlm_lockres_put(res
);
749 spin_unlock(&dlm
->spinlock
);
750 mlog(0, "allocating a new resource\n");
751 /* nothing found and we need to allocate one. */
752 alloc_mle
= (struct dlm_master_list_entry
*)
753 kmem_cache_alloc(dlm_mle_cache
, GFP_KERNEL
);
756 res
= dlm_new_lockres(dlm
, lockid
, namelen
);
762 mlog(0, "no lockres found, allocated our own: %p\n", res
);
764 if (flags
& LKM_LOCAL
) {
765 /* caller knows it's safe to assume it's not mastered elsewhere
766 * DONE! return right away */
767 spin_lock(&res
->spinlock
);
768 dlm_change_lockres_owner(dlm
, res
, dlm
->node_num
);
769 __dlm_insert_lockres(dlm
, res
);
770 spin_unlock(&res
->spinlock
);
771 spin_unlock(&dlm
->spinlock
);
772 /* lockres still marked IN_PROGRESS */
776 /* check master list to see if another node has started mastering it */
777 spin_lock(&dlm
->master_lock
);
779 /* if we found a block, wait for lock to be mastered by another node */
780 blocked
= dlm_find_mle(dlm
, &mle
, (char *)lockid
, namelen
);
782 if (mle
->type
== DLM_MLE_MASTER
) {
783 mlog(ML_ERROR
, "master entry for nonexistent lock!\n");
785 } else if (mle
->type
== DLM_MLE_MIGRATION
) {
786 /* migration is in progress! */
787 /* the good news is that we now know the
788 * "current" master (mle->master). */
790 spin_unlock(&dlm
->master_lock
);
791 assert_spin_locked(&dlm
->spinlock
);
793 /* set the lockres owner and hash it */
794 spin_lock(&res
->spinlock
);
795 dlm_set_lockres_owner(dlm
, res
, mle
->master
);
796 __dlm_insert_lockres(dlm
, res
);
797 spin_unlock(&res
->spinlock
);
798 spin_unlock(&dlm
->spinlock
);
800 /* master is known, detach */
801 dlm_mle_detach_hb_events(dlm
, mle
);
807 /* go ahead and try to master lock on this node */
809 /* make sure this does not get freed below */
811 dlm_init_mle(mle
, DLM_MLE_MASTER
, dlm
, res
, NULL
, 0);
812 set_bit(dlm
->node_num
, mle
->maybe_map
);
813 list_add(&mle
->list
, &dlm
->master_list
);
815 /* still holding the dlm spinlock, check the recovery map
816 * to see if there are any nodes that still need to be
817 * considered. these will not appear in the mle nodemap
818 * but they might own this lockres. wait on them. */
819 bit
= find_next_bit(dlm
->recovery_map
, O2NM_MAX_NODES
, 0);
820 if (bit
< O2NM_MAX_NODES
) {
821 mlog(ML_NOTICE
, "%s:%.*s: at least one node (%d) to"
822 "recover before lock mastery can begin\n",
823 dlm
->name
, namelen
, (char *)lockid
, bit
);
824 wait_on_recovery
= 1;
828 /* at this point there is either a DLM_MLE_BLOCK or a
829 * DLM_MLE_MASTER on the master list, so it's safe to add the
830 * lockres to the hashtable. anyone who finds the lock will
831 * still have to wait on the IN_PROGRESS. */
833 /* finally add the lockres to its hash bucket */
834 __dlm_insert_lockres(dlm
, res
);
835 /* get an extra ref on the mle in case this is a BLOCK
836 * if so, the creator of the BLOCK may try to put the last
837 * ref at this time in the assert master handler, so we
838 * need an extra one to keep from a bad ptr deref. */
839 dlm_get_mle_inuse(mle
);
840 spin_unlock(&dlm
->master_lock
);
841 spin_unlock(&dlm
->spinlock
);
843 while (wait_on_recovery
) {
844 /* any cluster changes that occurred after dropping the
845 * dlm spinlock would be detectable be a change on the mle,
846 * so we only need to clear out the recovery map once. */
847 if (dlm_is_recovery_lock(lockid
, namelen
)) {
848 mlog(ML_NOTICE
, "%s: recovery map is not empty, but "
849 "must master $RECOVERY lock now\n", dlm
->name
);
850 if (!dlm_pre_master_reco_lockres(dlm
, res
))
851 wait_on_recovery
= 0;
853 mlog(0, "%s: waiting 500ms for heartbeat state "
854 "change\n", dlm
->name
);
860 dlm_kick_recovery_thread(dlm
);
862 dlm_wait_for_recovery(dlm
);
864 spin_lock(&dlm
->spinlock
);
865 bit
= find_next_bit(dlm
->recovery_map
, O2NM_MAX_NODES
, 0);
866 if (bit
< O2NM_MAX_NODES
) {
867 mlog(ML_NOTICE
, "%s:%.*s: at least one node (%d) to"
868 "recover before lock mastery can begin\n",
869 dlm
->name
, namelen
, (char *)lockid
, bit
);
870 wait_on_recovery
= 1;
872 wait_on_recovery
= 0;
873 spin_unlock(&dlm
->spinlock
);
876 /* must wait for lock to be mastered elsewhere */
882 dlm_node_iter_init(mle
->vote_map
, &iter
);
883 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
884 ret
= dlm_do_master_request(mle
, nodenum
);
887 if (mle
->master
!= O2NM_MAX_NODES
) {
888 /* found a master ! */
889 if (mle
->master
<= nodenum
)
891 /* if our master request has not reached the master
892 * yet, keep going until it does. this is how the
893 * master will know that asserts are needed back to
894 * the lower nodes. */
895 mlog(0, "%s:%.*s: requests only up to %u but master "
896 "is %u, keep going\n", dlm
->name
, namelen
,
897 lockid
, nodenum
, mle
->master
);
902 /* keep going until the response map includes all nodes */
903 ret
= dlm_wait_for_lock_mastery(dlm
, res
, mle
, &blocked
);
905 mlog(0, "%s:%.*s: node map changed, redo the "
906 "master request now, blocked=%d\n",
907 dlm
->name
, res
->lockname
.len
,
908 res
->lockname
.name
, blocked
);
910 mlog(ML_ERROR
, "%s:%.*s: spinning on "
911 "dlm_wait_for_lock_mastery, blocked=%d\n",
912 dlm
->name
, res
->lockname
.len
,
913 res
->lockname
.name
, blocked
);
914 dlm_print_one_lock_resource(res
);
915 /* dlm_print_one_mle(mle); */
921 mlog(0, "lockres mastered by %u\n", res
->owner
);
922 /* make sure we never continue without this */
923 BUG_ON(res
->owner
== O2NM_MAX_NODES
);
925 /* master is known, detach if not already detached */
926 dlm_mle_detach_hb_events(dlm
, mle
);
928 /* put the extra ref */
929 dlm_put_mle_inuse(mle
);
932 spin_lock(&res
->spinlock
);
933 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
934 spin_unlock(&res
->spinlock
);
938 /* need to free the unused mle */
940 kmem_cache_free(dlm_mle_cache
, alloc_mle
);
946 #define DLM_MASTERY_TIMEOUT_MS 5000
948 static int dlm_wait_for_lock_mastery(struct dlm_ctxt
*dlm
,
949 struct dlm_lock_resource
*res
,
950 struct dlm_master_list_entry
*mle
,
955 int map_changed
, voting_done
;
962 /* check if another node has already become the owner */
963 spin_lock(&res
->spinlock
);
964 if (res
->owner
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
965 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm
->name
,
966 res
->lockname
.len
, res
->lockname
.name
, res
->owner
);
967 spin_unlock(&res
->spinlock
);
968 /* this will cause the master to re-assert across
969 * the whole cluster, freeing up mles */
970 ret
= dlm_do_master_request(mle
, res
->owner
);
972 /* give recovery a chance to run */
973 mlog(ML_ERROR
, "link to %u went down?: %d\n", res
->owner
, ret
);
980 spin_unlock(&res
->spinlock
);
982 spin_lock(&mle
->spinlock
);
984 map_changed
= (memcmp(mle
->vote_map
, mle
->node_map
,
985 sizeof(mle
->vote_map
)) != 0);
986 voting_done
= (memcmp(mle
->vote_map
, mle
->response_map
,
987 sizeof(mle
->vote_map
)) == 0);
989 /* restart if we hit any errors */
992 mlog(0, "%s: %.*s: node map changed, restarting\n",
993 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
994 ret
= dlm_restart_lock_mastery(dlm
, res
, mle
, *blocked
);
995 b
= (mle
->type
== DLM_MLE_BLOCK
);
996 if ((*blocked
&& !b
) || (!*blocked
&& b
)) {
997 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
998 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
1002 spin_unlock(&mle
->spinlock
);
1007 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1008 "rechecking now\n", dlm
->name
, res
->lockname
.len
,
1009 res
->lockname
.name
);
1013 if (m
!= O2NM_MAX_NODES
) {
1014 /* another node has done an assert!
1019 /* have all nodes responded? */
1020 if (voting_done
&& !*blocked
) {
1021 bit
= find_next_bit(mle
->maybe_map
, O2NM_MAX_NODES
, 0);
1022 if (dlm
->node_num
<= bit
) {
1023 /* my node number is lowest.
1024 * now tell other nodes that I am
1025 * mastering this. */
1026 mle
->master
= dlm
->node_num
;
1030 /* if voting is done, but we have not received
1031 * an assert master yet, we must sleep */
1035 spin_unlock(&mle
->spinlock
);
1037 /* sleep if we haven't finished voting yet */
1039 unsigned long timeo
= msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS
);
1042 if (atomic_read(&mle->mle_refs.refcount) < 2)
1043 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1044 atomic_read(&mle->mle_refs.refcount),
1045 res->lockname.len, res->lockname.name);
1047 atomic_set(&mle
->woken
, 0);
1048 (void)wait_event_timeout(mle
->wq
,
1049 (atomic_read(&mle
->woken
) == 1),
1051 if (res
->owner
== O2NM_MAX_NODES
) {
1052 mlog(0, "waiting again\n");
1055 mlog(0, "done waiting, master is %u\n", res
->owner
);
1063 mlog(0, "about to master %.*s here, this=%u\n",
1064 res
->lockname
.len
, res
->lockname
.name
, m
);
1065 ret
= dlm_do_assert_master(dlm
, res
->lockname
.name
,
1066 res
->lockname
.len
, mle
->vote_map
, 0);
1068 /* This is a failure in the network path,
1069 * not in the response to the assert_master
1070 * (any nonzero response is a BUG on this node).
1071 * Most likely a socket just got disconnected
1072 * due to node death. */
1075 /* no longer need to restart lock mastery.
1076 * all living nodes have been contacted. */
1080 /* set the lockres owner */
1081 spin_lock(&res
->spinlock
);
1082 dlm_change_lockres_owner(dlm
, res
, m
);
1083 spin_unlock(&res
->spinlock
);
1089 struct dlm_bitmap_diff_iter
1092 unsigned long *orig_bm
;
1093 unsigned long *cur_bm
;
1094 unsigned long diff_bm
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
1097 enum dlm_node_state_change
1104 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter
*iter
,
1105 unsigned long *orig_bm
,
1106 unsigned long *cur_bm
)
1108 unsigned long p1
, p2
;
1112 iter
->orig_bm
= orig_bm
;
1113 iter
->cur_bm
= cur_bm
;
1115 for (i
= 0; i
< BITS_TO_LONGS(O2NM_MAX_NODES
); i
++) {
1116 p1
= *(iter
->orig_bm
+ i
);
1117 p2
= *(iter
->cur_bm
+ i
);
1118 iter
->diff_bm
[i
] = (p1
& ~p2
) | (p2
& ~p1
);
1122 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter
*iter
,
1123 enum dlm_node_state_change
*state
)
1127 if (iter
->curnode
>= O2NM_MAX_NODES
)
1130 bit
= find_next_bit(iter
->diff_bm
, O2NM_MAX_NODES
,
1132 if (bit
>= O2NM_MAX_NODES
) {
1133 iter
->curnode
= O2NM_MAX_NODES
;
1137 /* if it was there in the original then this node died */
1138 if (test_bit(bit
, iter
->orig_bm
))
1143 iter
->curnode
= bit
;
1148 static int dlm_restart_lock_mastery(struct dlm_ctxt
*dlm
,
1149 struct dlm_lock_resource
*res
,
1150 struct dlm_master_list_entry
*mle
,
1153 struct dlm_bitmap_diff_iter bdi
;
1154 enum dlm_node_state_change sc
;
1158 mlog(0, "something happened such that the "
1159 "master process may need to be restarted!\n");
1161 assert_spin_locked(&mle
->spinlock
);
1163 dlm_bitmap_diff_iter_init(&bdi
, mle
->vote_map
, mle
->node_map
);
1164 node
= dlm_bitmap_diff_iter_next(&bdi
, &sc
);
1166 if (sc
== NODE_UP
) {
1167 /* a node came up. clear any old vote from
1168 * the response map and set it in the vote map
1169 * then restart the mastery. */
1170 mlog(ML_NOTICE
, "node %d up while restarting\n", node
);
1172 /* redo the master request, but only for the new node */
1173 mlog(0, "sending request to new node\n");
1174 clear_bit(node
, mle
->response_map
);
1175 set_bit(node
, mle
->vote_map
);
1177 mlog(ML_ERROR
, "node down! %d\n", node
);
1179 /* if the node wasn't involved in mastery skip it,
1180 * but clear it out from the maps so that it will
1181 * not affect mastery of this lockres */
1182 clear_bit(node
, mle
->response_map
);
1183 clear_bit(node
, mle
->vote_map
);
1184 if (!test_bit(node
, mle
->maybe_map
))
1187 /* if we're already blocked on lock mastery, and the
1188 * dead node wasn't the expected master, or there is
1189 * another node in the maybe_map, keep waiting */
1191 int lowest
= find_next_bit(mle
->maybe_map
,
1194 /* act like it was never there */
1195 clear_bit(node
, mle
->maybe_map
);
1200 mlog(ML_ERROR
, "expected master %u died while "
1201 "this node was blocked waiting on it!\n",
1203 lowest
= find_next_bit(mle
->maybe_map
,
1206 if (lowest
< O2NM_MAX_NODES
) {
1207 mlog(0, "still blocked. waiting "
1208 "on %u now\n", lowest
);
1212 /* mle is an MLE_BLOCK, but there is now
1213 * nothing left to block on. we need to return
1214 * all the way back out and try again with
1215 * an MLE_MASTER. dlm_do_local_recovery_cleanup
1216 * has already run, so the mle refcount is ok */
1217 mlog(0, "no longer blocking. we can "
1218 "try to master this here\n");
1219 mle
->type
= DLM_MLE_MASTER
;
1220 memset(mle
->maybe_map
, 0,
1221 sizeof(mle
->maybe_map
));
1222 memset(mle
->response_map
, 0,
1223 sizeof(mle
->maybe_map
));
1224 memcpy(mle
->vote_map
, mle
->node_map
,
1225 sizeof(mle
->node_map
));
1227 set_bit(dlm
->node_num
, mle
->maybe_map
);
1233 clear_bit(node
, mle
->maybe_map
);
1234 if (node
> dlm
->node_num
)
1237 mlog(0, "dead node in map!\n");
1238 /* yuck. go back and re-contact all nodes
1239 * in the vote_map, removing this node. */
1240 memset(mle
->response_map
, 0,
1241 sizeof(mle
->response_map
));
1245 node
= dlm_bitmap_diff_iter_next(&bdi
, &sc
);
1252 * DLM_MASTER_REQUEST_MSG
1254 * returns: 0 on success,
1255 * -errno on a network error
1257 * on error, the caller should assume the target node is "dead"
1261 static int dlm_do_master_request(struct dlm_master_list_entry
*mle
, int to
)
1263 struct dlm_ctxt
*dlm
= mle
->dlm
;
1264 struct dlm_master_request request
;
1265 int ret
, response
=0, resend
;
1267 memset(&request
, 0, sizeof(request
));
1268 request
.node_idx
= dlm
->node_num
;
1270 BUG_ON(mle
->type
== DLM_MLE_MIGRATION
);
1272 if (mle
->type
!= DLM_MLE_MASTER
) {
1273 request
.namelen
= mle
->u
.name
.len
;
1274 memcpy(request
.name
, mle
->u
.name
.name
, request
.namelen
);
1276 request
.namelen
= mle
->u
.res
->lockname
.len
;
1277 memcpy(request
.name
, mle
->u
.res
->lockname
.name
,
1282 ret
= o2net_send_message(DLM_MASTER_REQUEST_MSG
, dlm
->key
, &request
,
1283 sizeof(request
), to
, &response
);
1285 if (ret
== -ESRCH
) {
1286 /* should never happen */
1287 mlog(ML_ERROR
, "TCP stack not ready!\n");
1289 } else if (ret
== -EINVAL
) {
1290 mlog(ML_ERROR
, "bad args passed to o2net!\n");
1292 } else if (ret
== -ENOMEM
) {
1293 mlog(ML_ERROR
, "out of memory while trying to send "
1294 "network message! retrying\n");
1295 /* this is totally crude */
1298 } else if (!dlm_is_host_down(ret
)) {
1299 /* not a network error. bad. */
1301 mlog(ML_ERROR
, "unhandled error!");
1304 /* all other errors should be network errors,
1305 * and likely indicate node death */
1306 mlog(ML_ERROR
, "link to %d went down!\n", to
);
1312 spin_lock(&mle
->spinlock
);
1314 case DLM_MASTER_RESP_YES
:
1315 set_bit(to
, mle
->response_map
);
1316 mlog(0, "node %u is the master, response=YES\n", to
);
1319 case DLM_MASTER_RESP_NO
:
1320 mlog(0, "node %u not master, response=NO\n", to
);
1321 set_bit(to
, mle
->response_map
);
1323 case DLM_MASTER_RESP_MAYBE
:
1324 mlog(0, "node %u not master, response=MAYBE\n", to
);
1325 set_bit(to
, mle
->response_map
);
1326 set_bit(to
, mle
->maybe_map
);
1328 case DLM_MASTER_RESP_ERROR
:
1329 mlog(0, "node %u hit an error, resending\n", to
);
1334 mlog(ML_ERROR
, "bad response! %u\n", response
);
1337 spin_unlock(&mle
->spinlock
);
1339 /* this is also totally crude */
1349 * locks that can be taken here:
1355 * if possible, TRIM THIS DOWN!!!
1357 int dlm_master_request_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
1359 u8 response
= DLM_MASTER_RESP_MAYBE
;
1360 struct dlm_ctxt
*dlm
= data
;
1361 struct dlm_lock_resource
*res
= NULL
;
1362 struct dlm_master_request
*request
= (struct dlm_master_request
*) msg
->buf
;
1363 struct dlm_master_list_entry
*mle
= NULL
, *tmpmle
= NULL
;
1365 unsigned int namelen
, hash
;
1368 int dispatch_assert
= 0;
1371 return DLM_MASTER_RESP_NO
;
1373 if (!dlm_domain_fully_joined(dlm
)) {
1374 response
= DLM_MASTER_RESP_NO
;
1378 name
= request
->name
;
1379 namelen
= request
->namelen
;
1380 hash
= dlm_lockid_hash(name
, namelen
);
1382 if (namelen
> DLM_LOCKID_NAME_MAX
) {
1383 response
= DLM_IVBUFLEN
;
1388 spin_lock(&dlm
->spinlock
);
1389 res
= __dlm_lookup_lockres(dlm
, name
, namelen
, hash
);
1391 spin_unlock(&dlm
->spinlock
);
1393 /* take care of the easy cases up front */
1394 spin_lock(&res
->spinlock
);
1395 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
1396 spin_unlock(&res
->spinlock
);
1397 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1398 "being recovered\n");
1399 response
= DLM_MASTER_RESP_ERROR
;
1401 kmem_cache_free(dlm_mle_cache
, mle
);
1405 if (res
->owner
== dlm
->node_num
) {
1406 spin_unlock(&res
->spinlock
);
1407 // mlog(0, "this node is the master\n");
1408 response
= DLM_MASTER_RESP_YES
;
1410 kmem_cache_free(dlm_mle_cache
, mle
);
1412 /* this node is the owner.
1413 * there is some extra work that needs to
1414 * happen now. the requesting node has
1415 * caused all nodes up to this one to
1416 * create mles. this node now needs to
1417 * go back and clean those up. */
1418 dispatch_assert
= 1;
1420 } else if (res
->owner
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1421 spin_unlock(&res
->spinlock
);
1422 // mlog(0, "node %u is the master\n", res->owner);
1423 response
= DLM_MASTER_RESP_NO
;
1425 kmem_cache_free(dlm_mle_cache
, mle
);
1429 /* ok, there is no owner. either this node is
1430 * being blocked, or it is actively trying to
1431 * master this lock. */
1432 if (!(res
->state
& DLM_LOCK_RES_IN_PROGRESS
)) {
1433 mlog(ML_ERROR
, "lock with no owner should be "
1438 // mlog(0, "lockres is in progress...\n");
1439 spin_lock(&dlm
->master_lock
);
1440 found
= dlm_find_mle(dlm
, &tmpmle
, name
, namelen
);
1442 mlog(ML_ERROR
, "no mle found for this lock!\n");
1446 spin_lock(&tmpmle
->spinlock
);
1447 if (tmpmle
->type
== DLM_MLE_BLOCK
) {
1448 // mlog(0, "this node is waiting for "
1449 // "lockres to be mastered\n");
1450 response
= DLM_MASTER_RESP_NO
;
1451 } else if (tmpmle
->type
== DLM_MLE_MIGRATION
) {
1452 mlog(0, "node %u is master, but trying to migrate to "
1453 "node %u.\n", tmpmle
->master
, tmpmle
->new_master
);
1454 if (tmpmle
->master
== dlm
->node_num
) {
1455 response
= DLM_MASTER_RESP_YES
;
1456 mlog(ML_ERROR
, "no owner on lockres, but this "
1457 "node is trying to migrate it to %u?!\n",
1458 tmpmle
->new_master
);
1461 /* the real master can respond on its own */
1462 response
= DLM_MASTER_RESP_NO
;
1464 } else if (tmpmle
->master
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1466 if (tmpmle
->master
== dlm
->node_num
) {
1467 response
= DLM_MASTER_RESP_YES
;
1468 /* this node will be the owner.
1469 * go back and clean the mles on any
1471 dispatch_assert
= 1;
1473 response
= DLM_MASTER_RESP_NO
;
1475 // mlog(0, "this node is attempting to "
1476 // "master lockres\n");
1477 response
= DLM_MASTER_RESP_MAYBE
;
1480 set_bit(request
->node_idx
, tmpmle
->maybe_map
);
1481 spin_unlock(&tmpmle
->spinlock
);
1483 spin_unlock(&dlm
->master_lock
);
1484 spin_unlock(&res
->spinlock
);
1486 /* keep the mle attached to heartbeat events */
1487 dlm_put_mle(tmpmle
);
1489 kmem_cache_free(dlm_mle_cache
, mle
);
1494 * lockres doesn't exist on this node
1495 * if there is an MLE_BLOCK, return NO
1496 * if there is an MLE_MASTER, return MAYBE
1497 * otherwise, add an MLE_BLOCK, return NO
1499 spin_lock(&dlm
->master_lock
);
1500 found
= dlm_find_mle(dlm
, &tmpmle
, name
, namelen
);
1502 /* this lockid has never been seen on this node yet */
1503 // mlog(0, "no mle found\n");
1505 spin_unlock(&dlm
->master_lock
);
1506 spin_unlock(&dlm
->spinlock
);
1508 mle
= (struct dlm_master_list_entry
*)
1509 kmem_cache_alloc(dlm_mle_cache
, GFP_KERNEL
);
1511 response
= DLM_MASTER_RESP_ERROR
;
1512 mlog_errno(-ENOMEM
);
1518 // mlog(0, "this is second time thru, already allocated, "
1519 // "add the block.\n");
1520 dlm_init_mle(mle
, DLM_MLE_BLOCK
, dlm
, NULL
, name
, namelen
);
1521 set_bit(request
->node_idx
, mle
->maybe_map
);
1522 list_add(&mle
->list
, &dlm
->master_list
);
1523 response
= DLM_MASTER_RESP_NO
;
1525 // mlog(0, "mle was found\n");
1527 spin_lock(&tmpmle
->spinlock
);
1528 if (tmpmle
->master
== dlm
->node_num
) {
1529 mlog(ML_ERROR
, "no lockres, but an mle with this node as master!\n");
1532 if (tmpmle
->type
== DLM_MLE_BLOCK
)
1533 response
= DLM_MASTER_RESP_NO
;
1534 else if (tmpmle
->type
== DLM_MLE_MIGRATION
) {
1535 mlog(0, "migration mle was found (%u->%u)\n",
1536 tmpmle
->master
, tmpmle
->new_master
);
1537 /* real master can respond on its own */
1538 response
= DLM_MASTER_RESP_NO
;
1540 response
= DLM_MASTER_RESP_MAYBE
;
1542 set_bit(request
->node_idx
, tmpmle
->maybe_map
);
1543 spin_unlock(&tmpmle
->spinlock
);
1545 spin_unlock(&dlm
->master_lock
);
1546 spin_unlock(&dlm
->spinlock
);
1549 /* keep the mle attached to heartbeat events */
1550 dlm_put_mle(tmpmle
);
1554 if (dispatch_assert
) {
1555 if (response
!= DLM_MASTER_RESP_YES
)
1556 mlog(ML_ERROR
, "invalid response %d\n", response
);
1558 mlog(ML_ERROR
, "bad lockres while trying to assert!\n");
1561 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1562 dlm
->node_num
, res
->lockname
.len
, res
->lockname
.name
);
1563 ret
= dlm_dispatch_assert_master(dlm
, res
, 0, request
->node_idx
,
1564 DLM_ASSERT_MASTER_MLE_CLEANUP
);
1566 mlog(ML_ERROR
, "failed to dispatch assert master work\n");
1567 response
= DLM_MASTER_RESP_ERROR
;
1576 * DLM_ASSERT_MASTER_MSG
1581 * NOTE: this can be used for debugging
1582 * can periodically run all locks owned by this node
1583 * and re-assert across the cluster...
1585 static int dlm_do_assert_master(struct dlm_ctxt
*dlm
, const char *lockname
,
1586 unsigned int namelen
, void *nodemap
,
1589 struct dlm_assert_master
assert;
1591 struct dlm_node_iter iter
;
1595 BUG_ON(namelen
> O2NM_MAX_NAME_LEN
);
1599 /* note that if this nodemap is empty, it returns 0 */
1600 dlm_node_iter_init(nodemap
, &iter
);
1601 while ((to
= dlm_node_iter_next(&iter
)) >= 0) {
1603 mlog(0, "sending assert master to %d (%.*s)\n", to
,
1605 memset(&assert, 0, sizeof(assert));
1606 assert.node_idx
= dlm
->node_num
;
1607 assert.namelen
= namelen
;
1608 memcpy(assert.name
, lockname
, namelen
);
1609 assert.flags
= cpu_to_be32(flags
);
1611 tmpret
= o2net_send_message(DLM_ASSERT_MASTER_MSG
, dlm
->key
,
1612 &assert, sizeof(assert), to
, &r
);
1614 mlog(ML_ERROR
, "assert_master returned %d!\n", tmpret
);
1615 if (!dlm_is_host_down(tmpret
)) {
1616 mlog(ML_ERROR
, "unhandled error!\n");
1619 /* a node died. finish out the rest of the nodes. */
1620 mlog(ML_ERROR
, "link to %d went down!\n", to
);
1621 /* any nonzero status return will do */
1624 /* ok, something horribly messed. kill thyself. */
1625 mlog(ML_ERROR
,"during assert master of %.*s to %u, "
1626 "got %d.\n", namelen
, lockname
, to
, r
);
1627 dlm_dump_lock_resources(dlm
);
1629 } else if (r
== EAGAIN
) {
1630 mlog(0, "%.*s: node %u create mles on other "
1631 "nodes and requests a re-assert\n",
1632 namelen
, lockname
, to
);
1644 * locks that can be taken here:
1650 * if possible, TRIM THIS DOWN!!!
1652 int dlm_assert_master_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
1654 struct dlm_ctxt
*dlm
= data
;
1655 struct dlm_master_list_entry
*mle
= NULL
;
1656 struct dlm_assert_master
*assert = (struct dlm_assert_master
*)msg
->buf
;
1657 struct dlm_lock_resource
*res
= NULL
;
1659 unsigned int namelen
, hash
;
1661 int master_request
= 0;
1667 name
= assert->name
;
1668 namelen
= assert->namelen
;
1669 hash
= dlm_lockid_hash(name
, namelen
);
1670 flags
= be32_to_cpu(assert->flags
);
1672 if (namelen
> DLM_LOCKID_NAME_MAX
) {
1673 mlog(ML_ERROR
, "Invalid name length!");
1677 spin_lock(&dlm
->spinlock
);
1680 mlog(0, "assert_master with flags: %u\n", flags
);
1683 spin_lock(&dlm
->master_lock
);
1684 if (!dlm_find_mle(dlm
, &mle
, name
, namelen
)) {
1685 /* not an error, could be master just re-asserting */
1686 mlog(0, "just got an assert_master from %u, but no "
1687 "MLE for it! (%.*s)\n", assert->node_idx
,
1690 int bit
= find_next_bit (mle
->maybe_map
, O2NM_MAX_NODES
, 0);
1691 if (bit
>= O2NM_MAX_NODES
) {
1692 /* not necessarily an error, though less likely.
1693 * could be master just re-asserting. */
1694 mlog(ML_ERROR
, "no bits set in the maybe_map, but %u "
1695 "is asserting! (%.*s)\n", assert->node_idx
,
1697 } else if (bit
!= assert->node_idx
) {
1698 if (flags
& DLM_ASSERT_MASTER_MLE_CLEANUP
) {
1699 mlog(0, "master %u was found, %u should "
1700 "back off\n", assert->node_idx
, bit
);
1702 /* with the fix for bug 569, a higher node
1703 * number winning the mastery will respond
1704 * YES to mastery requests, but this node
1705 * had no way of knowing. let it pass. */
1706 mlog(ML_ERROR
, "%u is the lowest node, "
1707 "%u is asserting. (%.*s) %u must "
1708 "have begun after %u won.\n", bit
,
1709 assert->node_idx
, namelen
, name
, bit
,
1713 if (mle
->type
== DLM_MLE_MIGRATION
) {
1714 if (flags
& DLM_ASSERT_MASTER_MLE_CLEANUP
) {
1715 mlog(0, "%s:%.*s: got cleanup assert"
1716 " from %u for migration\n",
1717 dlm
->name
, namelen
, name
,
1719 } else if (!(flags
& DLM_ASSERT_MASTER_FINISH_MIGRATION
)) {
1720 mlog(0, "%s:%.*s: got unrelated assert"
1721 " from %u for migration, ignoring\n",
1722 dlm
->name
, namelen
, name
,
1725 spin_unlock(&dlm
->master_lock
);
1726 spin_unlock(&dlm
->spinlock
);
1731 spin_unlock(&dlm
->master_lock
);
1733 /* ok everything checks out with the MLE
1734 * now check to see if there is a lockres */
1735 res
= __dlm_lookup_lockres(dlm
, name
, namelen
, hash
);
1737 spin_lock(&res
->spinlock
);
1738 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
1739 mlog(ML_ERROR
, "%u asserting but %.*s is "
1740 "RECOVERING!\n", assert->node_idx
, namelen
, name
);
1744 if (res
->owner
!= assert->node_idx
) {
1745 mlog(ML_ERROR
, "assert_master from "
1746 "%u, but current owner is "
1748 assert->node_idx
, res
->owner
,
1752 } else if (mle
->type
!= DLM_MLE_MIGRATION
) {
1753 if (res
->owner
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1754 /* owner is just re-asserting */
1755 if (res
->owner
== assert->node_idx
) {
1756 mlog(0, "owner %u re-asserting on "
1757 "lock %.*s\n", assert->node_idx
,
1761 mlog(ML_ERROR
, "got assert_master from "
1762 "node %u, but %u is the owner! "
1763 "(%.*s)\n", assert->node_idx
,
1764 res
->owner
, namelen
, name
);
1767 if (!(res
->state
& DLM_LOCK_RES_IN_PROGRESS
)) {
1768 mlog(ML_ERROR
, "got assert from %u, but lock "
1769 "with no owner should be "
1770 "in-progress! (%.*s)\n",
1775 } else /* mle->type == DLM_MLE_MIGRATION */ {
1776 /* should only be getting an assert from new master */
1777 if (assert->node_idx
!= mle
->new_master
) {
1778 mlog(ML_ERROR
, "got assert from %u, but "
1779 "new master is %u, and old master "
1781 assert->node_idx
, mle
->new_master
,
1782 mle
->master
, namelen
, name
);
1788 spin_unlock(&res
->spinlock
);
1790 spin_unlock(&dlm
->spinlock
);
1792 // mlog(0, "woo! got an assert_master from node %u!\n",
1793 // assert->node_idx);
1799 spin_lock(&mle
->spinlock
);
1800 if (mle
->type
== DLM_MLE_BLOCK
|| mle
->type
== DLM_MLE_MIGRATION
)
1803 /* MASTER mle: if any bits set in the response map
1804 * then the calling node needs to re-assert to clear
1805 * up nodes that this node contacted */
1806 while ((nn
= find_next_bit (mle
->response_map
, O2NM_MAX_NODES
,
1807 nn
+1)) < O2NM_MAX_NODES
) {
1808 if (nn
!= dlm
->node_num
&& nn
!= assert->node_idx
)
1812 mle
->master
= assert->node_idx
;
1813 atomic_set(&mle
->woken
, 1);
1815 spin_unlock(&mle
->spinlock
);
1818 spin_lock(&res
->spinlock
);
1819 if (mle
->type
== DLM_MLE_MIGRATION
) {
1820 mlog(0, "finishing off migration of lockres %.*s, "
1822 res
->lockname
.len
, res
->lockname
.name
,
1823 dlm
->node_num
, mle
->new_master
);
1824 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
1825 dlm_change_lockres_owner(dlm
, res
, mle
->new_master
);
1826 BUG_ON(res
->state
& DLM_LOCK_RES_DIRTY
);
1828 dlm_change_lockres_owner(dlm
, res
, mle
->master
);
1830 spin_unlock(&res
->spinlock
);
1833 /* master is known, detach if not already detached.
1834 * ensures that only one assert_master call will happen
1836 spin_lock(&dlm
->spinlock
);
1837 spin_lock(&dlm
->master_lock
);
1839 rr
= atomic_read(&mle
->mle_refs
.refcount
);
1840 if (mle
->inuse
> 0) {
1841 if (extra_ref
&& rr
< 3)
1843 else if (!extra_ref
&& rr
< 2)
1846 if (extra_ref
&& rr
< 2)
1848 else if (!extra_ref
&& rr
< 1)
1852 mlog(ML_ERROR
, "%s:%.*s: got assert master from %u "
1853 "that will mess up this node, refs=%d, extra=%d, "
1854 "inuse=%d\n", dlm
->name
, namelen
, name
,
1855 assert->node_idx
, rr
, extra_ref
, mle
->inuse
);
1856 dlm_print_one_mle(mle
);
1858 list_del_init(&mle
->list
);
1859 __dlm_mle_detach_hb_events(dlm
, mle
);
1862 /* the assert master message now balances the extra
1863 * ref given by the master / migration request message.
1864 * if this is the last put, it will be removed
1868 spin_unlock(&dlm
->master_lock
);
1869 spin_unlock(&dlm
->spinlock
);
1871 if (res
->owner
!= assert->node_idx
) {
1872 mlog(0, "assert_master from %u, but current "
1873 "owner is %u (%.*s), no mle\n", assert->node_idx
,
1874 res
->owner
, namelen
, name
);
1881 dlm_lockres_put(res
);
1883 if (master_request
) {
1884 mlog(0, "need to tell master to reassert\n");
1885 ret
= EAGAIN
; // positive. negative would shoot down the node.
1890 /* kill the caller! */
1891 spin_unlock(&res
->spinlock
);
1892 spin_unlock(&dlm
->spinlock
);
1893 dlm_lockres_put(res
);
1894 mlog(ML_ERROR
, "Bad message received from another node. Dumping state "
1895 "and killing the other node now! This node is OK and can continue.\n");
1896 dlm_dump_lock_resources(dlm
);
1901 int dlm_dispatch_assert_master(struct dlm_ctxt
*dlm
,
1902 struct dlm_lock_resource
*res
,
1903 int ignore_higher
, u8 request_from
, u32 flags
)
1905 struct dlm_work_item
*item
;
1906 item
= kcalloc(1, sizeof(*item
), GFP_KERNEL
);
1911 /* queue up work for dlm_assert_master_worker */
1912 dlm_grab(dlm
); /* get an extra ref for the work item */
1913 dlm_init_work_item(dlm
, item
, dlm_assert_master_worker
, NULL
);
1914 item
->u
.am
.lockres
= res
; /* already have a ref */
1915 /* can optionally ignore node numbers higher than this node */
1916 item
->u
.am
.ignore_higher
= ignore_higher
;
1917 item
->u
.am
.request_from
= request_from
;
1918 item
->u
.am
.flags
= flags
;
1921 mlog(0, "IGNORE HIGHER: %.*s\n", res
->lockname
.len
,
1922 res
->lockname
.name
);
1924 spin_lock(&dlm
->work_lock
);
1925 list_add_tail(&item
->list
, &dlm
->work_list
);
1926 spin_unlock(&dlm
->work_lock
);
1928 schedule_work(&dlm
->dispatched_work
);
1932 static void dlm_assert_master_worker(struct dlm_work_item
*item
, void *data
)
1934 struct dlm_ctxt
*dlm
= data
;
1936 struct dlm_lock_resource
*res
;
1937 unsigned long nodemap
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
1944 res
= item
->u
.am
.lockres
;
1945 ignore_higher
= item
->u
.am
.ignore_higher
;
1946 request_from
= item
->u
.am
.request_from
;
1947 flags
= item
->u
.am
.flags
;
1949 spin_lock(&dlm
->spinlock
);
1950 memcpy(nodemap
, dlm
->domain_map
, sizeof(nodemap
));
1951 spin_unlock(&dlm
->spinlock
);
1953 clear_bit(dlm
->node_num
, nodemap
);
1954 if (ignore_higher
) {
1955 /* if is this just to clear up mles for nodes below
1956 * this node, do not send the message to the original
1957 * caller or any node number higher than this */
1958 clear_bit(request_from
, nodemap
);
1959 bit
= dlm
->node_num
;
1961 bit
= find_next_bit(nodemap
, O2NM_MAX_NODES
,
1963 if (bit
>= O2NM_MAX_NODES
)
1965 clear_bit(bit
, nodemap
);
1969 /* this call now finishes out the nodemap
1970 * even if one or more nodes die */
1971 mlog(0, "worker about to master %.*s here, this=%u\n",
1972 res
->lockname
.len
, res
->lockname
.name
, dlm
->node_num
);
1973 ret
= dlm_do_assert_master(dlm
, res
->lockname
.name
,
1977 /* no need to restart, we are done */
1981 dlm_lockres_put(res
);
1983 mlog(0, "finished with dlm_assert_master_worker\n");
1986 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
1987 * We cannot wait for node recovery to complete to begin mastering this
1988 * lockres because this lockres is used to kick off recovery! ;-)
1989 * So, do a pre-check on all living nodes to see if any of those nodes
1990 * think that $RECOVERY is currently mastered by a dead node. If so,
1991 * we wait a short time to allow that node to get notified by its own
1992 * heartbeat stack, then check again. All $RECOVERY lock resources
1993 * mastered by dead nodes are purged when the hearbeat callback is
1994 * fired, so we can know for sure that it is safe to continue once
1995 * the node returns a live node or no node. */
1996 static int dlm_pre_master_reco_lockres(struct dlm_ctxt
*dlm
,
1997 struct dlm_lock_resource
*res
)
1999 struct dlm_node_iter iter
;
2002 u8 master
= DLM_LOCK_RES_OWNER_UNKNOWN
;
2004 spin_lock(&dlm
->spinlock
);
2005 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2006 spin_unlock(&dlm
->spinlock
);
2008 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
2009 /* do not send to self */
2010 if (nodenum
== dlm
->node_num
)
2012 ret
= dlm_do_master_requery(dlm
, res
, nodenum
, &master
);
2015 if (!dlm_is_host_down(ret
))
2017 /* host is down, so answer for that node would be
2018 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2021 if (master
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
2022 /* check to see if this master is in the recovery map */
2023 spin_lock(&dlm
->spinlock
);
2024 if (test_bit(master
, dlm
->recovery_map
)) {
2025 mlog(ML_NOTICE
, "%s: node %u has not seen "
2026 "node %u go down yet, and thinks the "
2027 "dead node is mastering the recovery "
2028 "lock. must wait.\n", dlm
->name
,
2032 spin_unlock(&dlm
->spinlock
);
2033 mlog(0, "%s: reco lock master is %u\n", dlm
->name
,
2043 * DLM_MIGRATE_LOCKRES
2047 int dlm_migrate_lockres(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
2050 struct dlm_master_list_entry
*mle
= NULL
;
2051 struct dlm_master_list_entry
*oldmle
= NULL
;
2052 struct dlm_migratable_lockres
*mres
= NULL
;
2055 unsigned int namelen
;
2057 struct list_head
*queue
, *iter
;
2059 struct dlm_lock
*lock
;
2065 name
= res
->lockname
.name
;
2066 namelen
= res
->lockname
.len
;
2068 mlog(0, "migrating %.*s to %u\n", namelen
, name
, target
);
2071 * ensure this lockres is a proper candidate for migration
2073 spin_lock(&res
->spinlock
);
2074 if (res
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
2075 mlog(0, "cannot migrate lockres with unknown owner!\n");
2076 spin_unlock(&res
->spinlock
);
2079 if (res
->owner
!= dlm
->node_num
) {
2080 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2081 spin_unlock(&res
->spinlock
);
2084 mlog(0, "checking queues...\n");
2085 queue
= &res
->granted
;
2086 for (i
=0; i
<3; i
++) {
2087 list_for_each(iter
, queue
) {
2088 lock
= list_entry (iter
, struct dlm_lock
, list
);
2090 if (lock
->ml
.node
== dlm
->node_num
) {
2091 mlog(0, "found a lock owned by this node "
2092 "still on the %s queue! will not "
2093 "migrate this lockres\n",
2095 (i
==1 ? "converting" : "blocked"));
2096 spin_unlock(&res
->spinlock
);
2103 mlog(0, "all locks on this lockres are nonlocal. continuing\n");
2104 spin_unlock(&res
->spinlock
);
2108 mlog(0, "no locks were found on this lockres! done!\n");
2114 * preallocate up front
2115 * if this fails, abort
2119 mres
= (struct dlm_migratable_lockres
*) __get_free_page(GFP_KERNEL
);
2125 mle
= (struct dlm_master_list_entry
*) kmem_cache_alloc(dlm_mle_cache
,
2134 * find a node to migrate the lockres to
2137 mlog(0, "picking a migration node\n");
2138 spin_lock(&dlm
->spinlock
);
2139 /* pick a new node */
2140 if (!test_bit(target
, dlm
->domain_map
) ||
2141 target
>= O2NM_MAX_NODES
) {
2142 target
= dlm_pick_migration_target(dlm
, res
);
2144 mlog(0, "node %u chosen for migration\n", target
);
2146 if (target
>= O2NM_MAX_NODES
||
2147 !test_bit(target
, dlm
->domain_map
)) {
2148 /* target chosen is not alive */
2153 spin_unlock(&dlm
->spinlock
);
2157 mlog(0, "continuing with target = %u\n", target
);
2160 * clear any existing master requests and
2161 * add the migration mle to the list
2163 spin_lock(&dlm
->master_lock
);
2164 ret
= dlm_add_migration_mle(dlm
, res
, mle
, &oldmle
, name
,
2165 namelen
, target
, dlm
->node_num
);
2166 spin_unlock(&dlm
->master_lock
);
2167 spin_unlock(&dlm
->spinlock
);
2169 if (ret
== -EEXIST
) {
2170 mlog(0, "another process is already migrating it\n");
2176 * set the MIGRATING flag and flush asts
2177 * if we fail after this we need to re-dirty the lockres
2179 if (dlm_mark_lockres_migrating(dlm
, res
, target
) < 0) {
2180 mlog(ML_ERROR
, "tried to migrate %.*s to %u, but "
2181 "the target went down.\n", res
->lockname
.len
,
2182 res
->lockname
.name
, target
);
2183 spin_lock(&res
->spinlock
);
2184 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2185 spin_unlock(&res
->spinlock
);
2191 /* master is known, detach if not already detached */
2192 dlm_mle_detach_hb_events(dlm
, oldmle
);
2193 dlm_put_mle(oldmle
);
2198 dlm_mle_detach_hb_events(dlm
, mle
);
2201 kmem_cache_free(dlm_mle_cache
, mle
);
2207 * at this point, we have a migration target, an mle
2208 * in the master list, and the MIGRATING flag set on
2213 /* get an extra reference on the mle.
2214 * otherwise the assert_master from the new
2215 * master will destroy this.
2216 * also, make sure that all callers of dlm_get_mle
2217 * take both dlm->spinlock and dlm->master_lock */
2218 spin_lock(&dlm
->spinlock
);
2219 spin_lock(&dlm
->master_lock
);
2220 dlm_get_mle_inuse(mle
);
2221 spin_unlock(&dlm
->master_lock
);
2222 spin_unlock(&dlm
->spinlock
);
2224 /* notify new node and send all lock state */
2225 /* call send_one_lockres with migration flag.
2226 * this serves as notice to the target node that a
2227 * migration is starting. */
2228 ret
= dlm_send_one_lockres(dlm
, res
, mres
, target
,
2229 DLM_MRES_MIGRATION
);
2232 mlog(0, "migration to node %u failed with %d\n",
2234 /* migration failed, detach and clean up mle */
2235 dlm_mle_detach_hb_events(dlm
, mle
);
2237 dlm_put_mle_inuse(mle
);
2238 spin_lock(&res
->spinlock
);
2239 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2240 spin_unlock(&res
->spinlock
);
2244 /* at this point, the target sends a message to all nodes,
2245 * (using dlm_do_migrate_request). this node is skipped since
2246 * we had to put an mle in the list to begin the process. this
2247 * node now waits for target to do an assert master. this node
2248 * will be the last one notified, ensuring that the migration
2249 * is complete everywhere. if the target dies while this is
2250 * going on, some nodes could potentially see the target as the
2251 * master, so it is important that my recovery finds the migration
2252 * mle and sets the master to UNKNONWN. */
2255 /* wait for new node to assert master */
2257 ret
= wait_event_interruptible_timeout(mle
->wq
,
2258 (atomic_read(&mle
->woken
) == 1),
2259 msecs_to_jiffies(5000));
2262 if (atomic_read(&mle
->woken
) == 1 ||
2263 res
->owner
== target
)
2266 mlog(0, "timed out during migration\n");
2267 /* avoid hang during shutdown when migrating lockres
2268 * to a node which also goes down */
2269 if (dlm_is_node_dead(dlm
, target
)) {
2270 mlog(0, "%s:%.*s: expected migration target %u "
2271 "is no longer up. restarting.\n",
2272 dlm
->name
, res
->lockname
.len
,
2273 res
->lockname
.name
, target
);
2277 if (ret
== -ERESTARTSYS
) {
2278 /* migration failed, detach and clean up mle */
2279 dlm_mle_detach_hb_events(dlm
, mle
);
2281 dlm_put_mle_inuse(mle
);
2282 spin_lock(&res
->spinlock
);
2283 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2284 spin_unlock(&res
->spinlock
);
2287 /* TODO: if node died: stop, clean up, return error */
2290 /* all done, set the owner, clear the flag */
2291 spin_lock(&res
->spinlock
);
2292 dlm_set_lockres_owner(dlm
, res
, target
);
2293 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2294 dlm_remove_nonlocal_locks(dlm
, res
);
2295 spin_unlock(&res
->spinlock
);
2298 /* master is known, detach if not already detached */
2299 dlm_mle_detach_hb_events(dlm
, mle
);
2300 dlm_put_mle_inuse(mle
);
2303 dlm_lockres_calc_usage(dlm
, res
);
2306 /* re-dirty the lockres if we failed */
2308 dlm_kick_thread(dlm
, res
);
2312 free_page((unsigned long)mres
);
2316 mlog(0, "returning %d\n", ret
);
2319 EXPORT_SYMBOL_GPL(dlm_migrate_lockres
);
2321 int dlm_lock_basts_flushed(struct dlm_ctxt
*dlm
, struct dlm_lock
*lock
)
2324 spin_lock(&dlm
->ast_lock
);
2325 spin_lock(&lock
->spinlock
);
2326 ret
= (list_empty(&lock
->bast_list
) && !lock
->bast_pending
);
2327 spin_unlock(&lock
->spinlock
);
2328 spin_unlock(&dlm
->ast_lock
);
2332 static int dlm_migration_can_proceed(struct dlm_ctxt
*dlm
,
2333 struct dlm_lock_resource
*res
,
2337 spin_lock(&res
->spinlock
);
2338 can_proceed
= !!(res
->state
& DLM_LOCK_RES_MIGRATING
);
2339 spin_unlock(&res
->spinlock
);
2341 /* target has died, so make the caller break out of the
2342 * wait_event, but caller must recheck the domain_map */
2343 spin_lock(&dlm
->spinlock
);
2344 if (!test_bit(mig_target
, dlm
->domain_map
))
2346 spin_unlock(&dlm
->spinlock
);
2350 int dlm_lockres_is_dirty(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
)
2353 spin_lock(&res
->spinlock
);
2354 ret
= !!(res
->state
& DLM_LOCK_RES_DIRTY
);
2355 spin_unlock(&res
->spinlock
);
2360 static int dlm_mark_lockres_migrating(struct dlm_ctxt
*dlm
,
2361 struct dlm_lock_resource
*res
,
2366 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2367 res
->lockname
.len
, res
->lockname
.name
, dlm
->node_num
,
2369 /* need to set MIGRATING flag on lockres. this is done by
2370 * ensuring that all asts have been flushed for this lockres. */
2371 spin_lock(&res
->spinlock
);
2372 BUG_ON(res
->migration_pending
);
2373 res
->migration_pending
= 1;
2374 /* strategy is to reserve an extra ast then release
2375 * it below, letting the release do all of the work */
2376 __dlm_lockres_reserve_ast(res
);
2377 spin_unlock(&res
->spinlock
);
2379 /* now flush all the pending asts.. hang out for a bit */
2380 dlm_kick_thread(dlm
, res
);
2381 wait_event(dlm
->ast_wq
, !dlm_lockres_is_dirty(dlm
, res
));
2382 dlm_lockres_release_ast(dlm
, res
);
2384 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2385 res
->state
& DLM_LOCK_RES_DIRTY
? "yes" : "no");
2386 /* if the extra ref we just put was the final one, this
2387 * will pass thru immediately. otherwise, we need to wait
2388 * for the last ast to finish. */
2390 ret
= wait_event_interruptible_timeout(dlm
->migration_wq
,
2391 dlm_migration_can_proceed(dlm
, res
, target
),
2392 msecs_to_jiffies(1000));
2394 mlog(0, "woken again: migrating? %s, dead? %s\n",
2395 res
->state
& DLM_LOCK_RES_MIGRATING
? "yes":"no",
2396 test_bit(target
, dlm
->domain_map
) ? "no":"yes");
2398 mlog(0, "all is well: migrating? %s, dead? %s\n",
2399 res
->state
& DLM_LOCK_RES_MIGRATING
? "yes":"no",
2400 test_bit(target
, dlm
->domain_map
) ? "no":"yes");
2402 if (!dlm_migration_can_proceed(dlm
, res
, target
)) {
2403 mlog(0, "trying again...\n");
2407 /* did the target go down or die? */
2408 spin_lock(&dlm
->spinlock
);
2409 if (!test_bit(target
, dlm
->domain_map
)) {
2410 mlog(ML_ERROR
, "aha. migration target %u just went down\n",
2414 spin_unlock(&dlm
->spinlock
);
2419 * o the DLM_LOCK_RES_MIGRATING flag is set
2420 * o there are no pending asts on this lockres
2421 * o all processes trying to reserve an ast on this
2422 * lockres must wait for the MIGRATING flag to clear
2427 /* last step in the migration process.
2428 * original master calls this to free all of the dlm_lock
2429 * structures that used to be for other nodes. */
2430 static void dlm_remove_nonlocal_locks(struct dlm_ctxt
*dlm
,
2431 struct dlm_lock_resource
*res
)
2433 struct list_head
*iter
, *iter2
;
2434 struct list_head
*queue
= &res
->granted
;
2436 struct dlm_lock
*lock
;
2438 assert_spin_locked(&res
->spinlock
);
2440 BUG_ON(res
->owner
== dlm
->node_num
);
2442 for (i
=0; i
<3; i
++) {
2443 list_for_each_safe(iter
, iter2
, queue
) {
2444 lock
= list_entry (iter
, struct dlm_lock
, list
);
2445 if (lock
->ml
.node
!= dlm
->node_num
) {
2446 mlog(0, "putting lock for node %u\n",
2448 /* be extra careful */
2449 BUG_ON(!list_empty(&lock
->ast_list
));
2450 BUG_ON(!list_empty(&lock
->bast_list
));
2451 BUG_ON(lock
->ast_pending
);
2452 BUG_ON(lock
->bast_pending
);
2453 list_del_init(&lock
->list
);
2461 /* for now this is not too intelligent. we will
2462 * need stats to make this do the right thing.
2463 * this just finds the first lock on one of the
2464 * queues and uses that node as the target. */
2465 static u8
dlm_pick_migration_target(struct dlm_ctxt
*dlm
,
2466 struct dlm_lock_resource
*res
)
2469 struct list_head
*queue
= &res
->granted
;
2470 struct list_head
*iter
;
2471 struct dlm_lock
*lock
;
2474 assert_spin_locked(&dlm
->spinlock
);
2476 spin_lock(&res
->spinlock
);
2477 for (i
=0; i
<3; i
++) {
2478 list_for_each(iter
, queue
) {
2479 /* up to the caller to make sure this node
2481 lock
= list_entry (iter
, struct dlm_lock
, list
);
2482 if (lock
->ml
.node
!= dlm
->node_num
) {
2483 spin_unlock(&res
->spinlock
);
2484 return lock
->ml
.node
;
2489 spin_unlock(&res
->spinlock
);
2490 mlog(0, "have not found a suitable target yet! checking domain map\n");
2492 /* ok now we're getting desperate. pick anyone alive. */
2495 nodenum
= find_next_bit(dlm
->domain_map
,
2496 O2NM_MAX_NODES
, nodenum
+1);
2497 mlog(0, "found %d in domain map\n", nodenum
);
2498 if (nodenum
>= O2NM_MAX_NODES
)
2500 if (nodenum
!= dlm
->node_num
) {
2501 mlog(0, "picking %d\n", nodenum
);
2506 mlog(0, "giving up. no master to migrate to\n");
2507 return DLM_LOCK_RES_OWNER_UNKNOWN
;
2512 /* this is called by the new master once all lockres
2513 * data has been received */
2514 static int dlm_do_migrate_request(struct dlm_ctxt
*dlm
,
2515 struct dlm_lock_resource
*res
,
2516 u8 master
, u8 new_master
,
2517 struct dlm_node_iter
*iter
)
2519 struct dlm_migrate_request migrate
;
2520 int ret
, status
= 0;
2523 memset(&migrate
, 0, sizeof(migrate
));
2524 migrate
.namelen
= res
->lockname
.len
;
2525 memcpy(migrate
.name
, res
->lockname
.name
, migrate
.namelen
);
2526 migrate
.new_master
= new_master
;
2527 migrate
.master
= master
;
2531 /* send message to all nodes, except the master and myself */
2532 while ((nodenum
= dlm_node_iter_next(iter
)) >= 0) {
2533 if (nodenum
== master
||
2534 nodenum
== new_master
)
2537 ret
= o2net_send_message(DLM_MIGRATE_REQUEST_MSG
, dlm
->key
,
2538 &migrate
, sizeof(migrate
), nodenum
,
2542 else if (status
< 0) {
2543 mlog(0, "migrate request (node %u) returned %d!\n",
2552 mlog(0, "returning ret=%d\n", ret
);
2557 /* if there is an existing mle for this lockres, we now know who the master is.
2558 * (the one who sent us *this* message) we can clear it up right away.
2559 * since the process that put the mle on the list still has a reference to it,
2560 * we can unhash it now, set the master and wake the process. as a result,
2561 * we will have no mle in the list to start with. now we can add an mle for
2562 * the migration and this should be the only one found for those scanning the
2564 int dlm_migrate_request_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
2566 struct dlm_ctxt
*dlm
= data
;
2567 struct dlm_lock_resource
*res
= NULL
;
2568 struct dlm_migrate_request
*migrate
= (struct dlm_migrate_request
*) msg
->buf
;
2569 struct dlm_master_list_entry
*mle
= NULL
, *oldmle
= NULL
;
2571 unsigned int namelen
, hash
;
2577 name
= migrate
->name
;
2578 namelen
= migrate
->namelen
;
2579 hash
= dlm_lockid_hash(name
, namelen
);
2581 /* preallocate.. if this fails, abort */
2582 mle
= (struct dlm_master_list_entry
*) kmem_cache_alloc(dlm_mle_cache
,
2590 /* check for pre-existing lock */
2591 spin_lock(&dlm
->spinlock
);
2592 res
= __dlm_lookup_lockres(dlm
, name
, namelen
, hash
);
2593 spin_lock(&dlm
->master_lock
);
2596 spin_lock(&res
->spinlock
);
2597 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
2598 /* if all is working ok, this can only mean that we got
2599 * a migrate request from a node that we now see as
2600 * dead. what can we do here? drop it to the floor? */
2601 spin_unlock(&res
->spinlock
);
2602 mlog(ML_ERROR
, "Got a migrate request, but the "
2603 "lockres is marked as recovering!");
2604 kmem_cache_free(dlm_mle_cache
, mle
);
2605 ret
= -EINVAL
; /* need a better solution */
2608 res
->state
|= DLM_LOCK_RES_MIGRATING
;
2609 spin_unlock(&res
->spinlock
);
2612 /* ignore status. only nonzero status would BUG. */
2613 ret
= dlm_add_migration_mle(dlm
, res
, mle
, &oldmle
,
2615 migrate
->new_master
,
2619 spin_unlock(&dlm
->master_lock
);
2620 spin_unlock(&dlm
->spinlock
);
2623 /* master is known, detach if not already detached */
2624 dlm_mle_detach_hb_events(dlm
, oldmle
);
2625 dlm_put_mle(oldmle
);
2629 dlm_lockres_put(res
);
2635 /* must be holding dlm->spinlock and dlm->master_lock
2636 * when adding a migration mle, we can clear any other mles
2637 * in the master list because we know with certainty that
2638 * the master is "master". so we remove any old mle from
2639 * the list after setting it's master field, and then add
2640 * the new migration mle. this way we can hold with the rule
2641 * of having only one mle for a given lock name at all times. */
2642 static int dlm_add_migration_mle(struct dlm_ctxt
*dlm
,
2643 struct dlm_lock_resource
*res
,
2644 struct dlm_master_list_entry
*mle
,
2645 struct dlm_master_list_entry
**oldmle
,
2646 const char *name
, unsigned int namelen
,
2647 u8 new_master
, u8 master
)
2656 assert_spin_locked(&dlm
->spinlock
);
2657 assert_spin_locked(&dlm
->master_lock
);
2659 /* caller is responsible for any ref taken here on oldmle */
2660 found
= dlm_find_mle(dlm
, oldmle
, (char *)name
, namelen
);
2662 struct dlm_master_list_entry
*tmp
= *oldmle
;
2663 spin_lock(&tmp
->spinlock
);
2664 if (tmp
->type
== DLM_MLE_MIGRATION
) {
2665 if (master
== dlm
->node_num
) {
2666 /* ah another process raced me to it */
2667 mlog(0, "tried to migrate %.*s, but some "
2668 "process beat me to it\n",
2672 /* bad. 2 NODES are trying to migrate! */
2673 mlog(ML_ERROR
, "migration error mle: "
2674 "master=%u new_master=%u // request: "
2675 "master=%u new_master=%u // "
2677 tmp
->master
, tmp
->new_master
,
2683 /* this is essentially what assert_master does */
2684 tmp
->master
= master
;
2685 atomic_set(&tmp
->woken
, 1);
2687 /* remove it from the list so that only one
2688 * mle will be found */
2689 list_del_init(&tmp
->list
);
2690 __dlm_mle_detach_hb_events(dlm
, mle
);
2692 spin_unlock(&tmp
->spinlock
);
2695 /* now add a migration mle to the tail of the list */
2696 dlm_init_mle(mle
, DLM_MLE_MIGRATION
, dlm
, res
, name
, namelen
);
2697 mle
->new_master
= new_master
;
2698 mle
->master
= master
;
2699 /* do this for consistency with other mle types */
2700 set_bit(new_master
, mle
->maybe_map
);
2701 list_add(&mle
->list
, &dlm
->master_list
);
2707 void dlm_clean_master_list(struct dlm_ctxt
*dlm
, u8 dead_node
)
2709 struct list_head
*iter
, *iter2
;
2710 struct dlm_master_list_entry
*mle
;
2711 struct dlm_lock_resource
*res
;
2714 mlog_entry("dlm=%s, dead node=%u\n", dlm
->name
, dead_node
);
2716 assert_spin_locked(&dlm
->spinlock
);
2718 /* clean the master list */
2719 spin_lock(&dlm
->master_lock
);
2720 list_for_each_safe(iter
, iter2
, &dlm
->master_list
) {
2721 mle
= list_entry(iter
, struct dlm_master_list_entry
, list
);
2723 BUG_ON(mle
->type
!= DLM_MLE_BLOCK
&&
2724 mle
->type
!= DLM_MLE_MASTER
&&
2725 mle
->type
!= DLM_MLE_MIGRATION
);
2727 /* MASTER mles are initiated locally. the waiting
2728 * process will notice the node map change
2729 * shortly. let that happen as normal. */
2730 if (mle
->type
== DLM_MLE_MASTER
)
2734 /* BLOCK mles are initiated by other nodes.
2735 * need to clean up if the dead node would have
2736 * been the master. */
2737 if (mle
->type
== DLM_MLE_BLOCK
) {
2740 spin_lock(&mle
->spinlock
);
2741 bit
= find_next_bit(mle
->maybe_map
, O2NM_MAX_NODES
, 0);
2742 if (bit
!= dead_node
) {
2743 mlog(0, "mle found, but dead node %u would "
2744 "not have been master\n", dead_node
);
2745 spin_unlock(&mle
->spinlock
);
2747 /* must drop the refcount by one since the
2748 * assert_master will never arrive. this
2749 * may result in the mle being unlinked and
2750 * freed, but there may still be a process
2751 * waiting in the dlmlock path which is fine. */
2752 mlog(ML_ERROR
, "node %u was expected master\n",
2754 atomic_set(&mle
->woken
, 1);
2755 spin_unlock(&mle
->spinlock
);
2757 /* do not need events any longer, so detach
2759 __dlm_mle_detach_hb_events(dlm
, mle
);
2765 /* everything else is a MIGRATION mle */
2767 /* the rule for MIGRATION mles is that the master
2768 * becomes UNKNOWN if *either* the original or
2769 * the new master dies. all UNKNOWN lockreses
2770 * are sent to whichever node becomes the recovery
2771 * master. the new master is responsible for
2772 * determining if there is still a master for
2773 * this lockres, or if he needs to take over
2774 * mastery. either way, this node should expect
2775 * another message to resolve this. */
2776 if (mle
->master
!= dead_node
&&
2777 mle
->new_master
!= dead_node
)
2780 /* if we have reached this point, this mle needs to
2781 * be removed from the list and freed. */
2783 /* remove from the list early. NOTE: unlinking
2784 * list_head while in list_for_each_safe */
2785 __dlm_mle_detach_hb_events(dlm
, mle
);
2786 spin_lock(&mle
->spinlock
);
2787 list_del_init(&mle
->list
);
2788 atomic_set(&mle
->woken
, 1);
2789 spin_unlock(&mle
->spinlock
);
2792 mlog(0, "node %u died during migration from "
2793 "%u to %u!\n", dead_node
,
2794 mle
->master
, mle
->new_master
);
2795 /* if there is a lockres associated with this
2796 * mle, find it and set its owner to UNKNOWN */
2797 hash
= dlm_lockid_hash(mle
->u
.name
.name
, mle
->u
.name
.len
);
2798 res
= __dlm_lookup_lockres(dlm
, mle
->u
.name
.name
,
2799 mle
->u
.name
.len
, hash
);
2801 /* unfortunately if we hit this rare case, our
2802 * lock ordering is messed. we need to drop
2803 * the master lock so that we can take the
2804 * lockres lock, meaning that we will have to
2805 * restart from the head of list. */
2806 spin_unlock(&dlm
->master_lock
);
2808 /* move lockres onto recovery list */
2809 spin_lock(&res
->spinlock
);
2810 dlm_set_lockres_owner(dlm
, res
,
2811 DLM_LOCK_RES_OWNER_UNKNOWN
);
2812 dlm_move_lockres_to_recovery_list(dlm
, res
);
2813 spin_unlock(&res
->spinlock
);
2814 dlm_lockres_put(res
);
2816 /* about to get rid of mle, detach from heartbeat */
2817 __dlm_mle_detach_hb_events(dlm
, mle
);
2820 spin_lock(&dlm
->master_lock
);
2822 spin_unlock(&dlm
->master_lock
);
2828 /* this may be the last reference */
2831 spin_unlock(&dlm
->master_lock
);
2835 int dlm_finish_migration(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
2838 struct dlm_node_iter iter
;
2841 spin_lock(&dlm
->spinlock
);
2842 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2843 clear_bit(old_master
, iter
.node_map
);
2844 clear_bit(dlm
->node_num
, iter
.node_map
);
2845 spin_unlock(&dlm
->spinlock
);
2847 mlog(0, "now time to do a migrate request to other nodes\n");
2848 ret
= dlm_do_migrate_request(dlm
, res
, old_master
,
2849 dlm
->node_num
, &iter
);
2855 mlog(0, "doing assert master of %.*s to all except the original node\n",
2856 res
->lockname
.len
, res
->lockname
.name
);
2857 /* this call now finishes out the nodemap
2858 * even if one or more nodes die */
2859 ret
= dlm_do_assert_master(dlm
, res
->lockname
.name
,
2860 res
->lockname
.len
, iter
.node_map
,
2861 DLM_ASSERT_MASTER_FINISH_MIGRATION
);
2863 /* no longer need to retry. all living nodes contacted. */
2868 memset(iter
.node_map
, 0, sizeof(iter
.node_map
));
2869 set_bit(old_master
, iter
.node_map
);
2870 mlog(0, "doing assert master of %.*s back to %u\n",
2871 res
->lockname
.len
, res
->lockname
.name
, old_master
);
2872 ret
= dlm_do_assert_master(dlm
, res
->lockname
.name
,
2873 res
->lockname
.len
, iter
.node_map
,
2874 DLM_ASSERT_MASTER_FINISH_MIGRATION
);
2876 mlog(0, "assert master to original master failed "
2878 /* the only nonzero status here would be because of
2879 * a dead original node. we're done. */
2883 /* all done, set the owner, clear the flag */
2884 spin_lock(&res
->spinlock
);
2885 dlm_set_lockres_owner(dlm
, res
, dlm
->node_num
);
2886 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2887 spin_unlock(&res
->spinlock
);
2888 /* re-dirty it on the new master */
2889 dlm_kick_thread(dlm
, res
);
2896 * LOCKRES AST REFCOUNT
2897 * this is integral to migration
2900 /* for future intent to call an ast, reserve one ahead of time.
2901 * this should be called only after waiting on the lockres
2902 * with dlm_wait_on_lockres, and while still holding the
2903 * spinlock after the call. */
2904 void __dlm_lockres_reserve_ast(struct dlm_lock_resource
*res
)
2906 assert_spin_locked(&res
->spinlock
);
2907 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
2908 __dlm_print_one_lock_resource(res
);
2910 BUG_ON(res
->state
& DLM_LOCK_RES_MIGRATING
);
2912 atomic_inc(&res
->asts_reserved
);
2916 * used to drop the reserved ast, either because it went unused,
2917 * or because the ast/bast was actually called.
2919 * also, if there is a pending migration on this lockres,
2920 * and this was the last pending ast on the lockres,
2921 * atomically set the MIGRATING flag before we drop the lock.
2922 * this is how we ensure that migration can proceed with no
2923 * asts in progress. note that it is ok if the state of the
2924 * queues is such that a lock should be granted in the future
2925 * or that a bast should be fired, because the new master will
2926 * shuffle the lists on this lockres as soon as it is migrated.
2928 void dlm_lockres_release_ast(struct dlm_ctxt
*dlm
,
2929 struct dlm_lock_resource
*res
)
2931 if (!atomic_dec_and_lock(&res
->asts_reserved
, &res
->spinlock
))
2934 if (!res
->migration_pending
) {
2935 spin_unlock(&res
->spinlock
);
2939 BUG_ON(res
->state
& DLM_LOCK_RES_MIGRATING
);
2940 res
->migration_pending
= 0;
2941 res
->state
|= DLM_LOCK_RES_MIGRATING
;
2942 spin_unlock(&res
->spinlock
);
2944 wake_up(&dlm
->migration_wq
);