1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
54 #include "cluster/masklog.h"
56 static void dlm_mle_node_down(struct dlm_ctxt
*dlm
,
57 struct dlm_master_list_entry
*mle
,
58 struct o2nm_node
*node
,
60 static void dlm_mle_node_up(struct dlm_ctxt
*dlm
,
61 struct dlm_master_list_entry
*mle
,
62 struct o2nm_node
*node
,
65 static void dlm_assert_master_worker(struct dlm_work_item
*item
, void *data
);
66 static int dlm_do_assert_master(struct dlm_ctxt
*dlm
,
67 struct dlm_lock_resource
*res
,
68 void *nodemap
, u32 flags
);
69 static void dlm_deref_lockres_worker(struct dlm_work_item
*item
, void *data
);
71 static inline int dlm_mle_equal(struct dlm_ctxt
*dlm
,
72 struct dlm_master_list_entry
*mle
,
76 struct dlm_lock_resource
*res
;
81 if (mle
->type
== DLM_MLE_BLOCK
||
82 mle
->type
== DLM_MLE_MIGRATION
) {
83 if (namelen
!= mle
->u
.name
.len
||
84 memcmp(name
, mle
->u
.name
.name
, namelen
)!=0)
88 if (namelen
!= res
->lockname
.len
||
89 memcmp(res
->lockname
.name
, name
, namelen
) != 0)
95 static struct kmem_cache
*dlm_lockres_cache
= NULL
;
96 static struct kmem_cache
*dlm_lockname_cache
= NULL
;
97 static struct kmem_cache
*dlm_mle_cache
= NULL
;
99 static void dlm_mle_release(struct kref
*kref
);
100 static void dlm_init_mle(struct dlm_master_list_entry
*mle
,
101 enum dlm_mle_type type
,
102 struct dlm_ctxt
*dlm
,
103 struct dlm_lock_resource
*res
,
105 unsigned int namelen
);
106 static void dlm_put_mle(struct dlm_master_list_entry
*mle
);
107 static void __dlm_put_mle(struct dlm_master_list_entry
*mle
);
108 static int dlm_find_mle(struct dlm_ctxt
*dlm
,
109 struct dlm_master_list_entry
**mle
,
110 char *name
, unsigned int namelen
);
112 static int dlm_do_master_request(struct dlm_lock_resource
*res
,
113 struct dlm_master_list_entry
*mle
, int to
);
116 static int dlm_wait_for_lock_mastery(struct dlm_ctxt
*dlm
,
117 struct dlm_lock_resource
*res
,
118 struct dlm_master_list_entry
*mle
,
120 static int dlm_restart_lock_mastery(struct dlm_ctxt
*dlm
,
121 struct dlm_lock_resource
*res
,
122 struct dlm_master_list_entry
*mle
,
124 static int dlm_add_migration_mle(struct dlm_ctxt
*dlm
,
125 struct dlm_lock_resource
*res
,
126 struct dlm_master_list_entry
*mle
,
127 struct dlm_master_list_entry
**oldmle
,
128 const char *name
, unsigned int namelen
,
129 u8 new_master
, u8 master
);
131 static u8
dlm_pick_migration_target(struct dlm_ctxt
*dlm
,
132 struct dlm_lock_resource
*res
);
133 static void dlm_remove_nonlocal_locks(struct dlm_ctxt
*dlm
,
134 struct dlm_lock_resource
*res
);
135 static int dlm_mark_lockres_migrating(struct dlm_ctxt
*dlm
,
136 struct dlm_lock_resource
*res
,
138 static int dlm_pre_master_reco_lockres(struct dlm_ctxt
*dlm
,
139 struct dlm_lock_resource
*res
);
142 int dlm_is_host_down(int errno
)
159 case -EINVAL
: /* if returned from our tcp code,
160 this means there is no socket */
168 * MASTER LIST FUNCTIONS
173 * regarding master list entries and heartbeat callbacks:
175 * in order to avoid sleeping and allocation that occurs in
176 * heartbeat, master list entries are simply attached to the
177 * dlm's established heartbeat callbacks. the mle is attached
178 * when it is created, and since the dlm->spinlock is held at
179 * that time, any heartbeat event will be properly discovered
180 * by the mle. the mle needs to be detached from the
181 * dlm->mle_hb_events list as soon as heartbeat events are no
182 * longer useful to the mle, and before the mle is freed.
184 * as a general rule, heartbeat events are no longer needed by
185 * the mle once an "answer" regarding the lock master has been
188 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt
*dlm
,
189 struct dlm_master_list_entry
*mle
)
191 assert_spin_locked(&dlm
->spinlock
);
193 list_add_tail(&mle
->hb_events
, &dlm
->mle_hb_events
);
197 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt
*dlm
,
198 struct dlm_master_list_entry
*mle
)
200 if (!list_empty(&mle
->hb_events
))
201 list_del_init(&mle
->hb_events
);
205 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt
*dlm
,
206 struct dlm_master_list_entry
*mle
)
208 spin_lock(&dlm
->spinlock
);
209 __dlm_mle_detach_hb_events(dlm
, mle
);
210 spin_unlock(&dlm
->spinlock
);
213 static void dlm_get_mle_inuse(struct dlm_master_list_entry
*mle
)
215 struct dlm_ctxt
*dlm
;
218 assert_spin_locked(&dlm
->spinlock
);
219 assert_spin_locked(&dlm
->master_lock
);
221 kref_get(&mle
->mle_refs
);
224 static void dlm_put_mle_inuse(struct dlm_master_list_entry
*mle
)
226 struct dlm_ctxt
*dlm
;
229 spin_lock(&dlm
->spinlock
);
230 spin_lock(&dlm
->master_lock
);
233 spin_unlock(&dlm
->master_lock
);
234 spin_unlock(&dlm
->spinlock
);
238 /* remove from list and free */
239 static void __dlm_put_mle(struct dlm_master_list_entry
*mle
)
241 struct dlm_ctxt
*dlm
;
244 assert_spin_locked(&dlm
->spinlock
);
245 assert_spin_locked(&dlm
->master_lock
);
246 if (!atomic_read(&mle
->mle_refs
.refcount
)) {
247 /* this may or may not crash, but who cares.
249 mlog(ML_ERROR
, "bad mle: %p\n", mle
);
250 dlm_print_one_mle(mle
);
253 kref_put(&mle
->mle_refs
, dlm_mle_release
);
257 /* must not have any spinlocks coming in */
258 static void dlm_put_mle(struct dlm_master_list_entry
*mle
)
260 struct dlm_ctxt
*dlm
;
263 spin_lock(&dlm
->spinlock
);
264 spin_lock(&dlm
->master_lock
);
266 spin_unlock(&dlm
->master_lock
);
267 spin_unlock(&dlm
->spinlock
);
270 static inline void dlm_get_mle(struct dlm_master_list_entry
*mle
)
272 kref_get(&mle
->mle_refs
);
275 static void dlm_init_mle(struct dlm_master_list_entry
*mle
,
276 enum dlm_mle_type type
,
277 struct dlm_ctxt
*dlm
,
278 struct dlm_lock_resource
*res
,
280 unsigned int namelen
)
282 assert_spin_locked(&dlm
->spinlock
);
286 INIT_LIST_HEAD(&mle
->list
);
287 INIT_LIST_HEAD(&mle
->hb_events
);
288 memset(mle
->maybe_map
, 0, sizeof(mle
->maybe_map
));
289 spin_lock_init(&mle
->spinlock
);
290 init_waitqueue_head(&mle
->wq
);
291 atomic_set(&mle
->woken
, 0);
292 kref_init(&mle
->mle_refs
);
293 memset(mle
->response_map
, 0, sizeof(mle
->response_map
));
294 mle
->master
= O2NM_MAX_NODES
;
295 mle
->new_master
= O2NM_MAX_NODES
;
298 if (mle
->type
== DLM_MLE_MASTER
) {
301 } else if (mle
->type
== DLM_MLE_BLOCK
) {
303 memcpy(mle
->u
.name
.name
, name
, namelen
);
304 mle
->u
.name
.len
= namelen
;
305 } else /* DLM_MLE_MIGRATION */ {
307 memcpy(mle
->u
.name
.name
, name
, namelen
);
308 mle
->u
.name
.len
= namelen
;
311 /* copy off the node_map and register hb callbacks on our copy */
312 memcpy(mle
->node_map
, dlm
->domain_map
, sizeof(mle
->node_map
));
313 memcpy(mle
->vote_map
, dlm
->domain_map
, sizeof(mle
->vote_map
));
314 clear_bit(dlm
->node_num
, mle
->vote_map
);
315 clear_bit(dlm
->node_num
, mle
->node_map
);
317 /* attach the mle to the domain node up/down events */
318 __dlm_mle_attach_hb_events(dlm
, mle
);
322 /* returns 1 if found, 0 if not */
323 static int dlm_find_mle(struct dlm_ctxt
*dlm
,
324 struct dlm_master_list_entry
**mle
,
325 char *name
, unsigned int namelen
)
327 struct dlm_master_list_entry
*tmpmle
;
329 assert_spin_locked(&dlm
->master_lock
);
331 list_for_each_entry(tmpmle
, &dlm
->master_list
, list
) {
332 if (!dlm_mle_equal(dlm
, tmpmle
, name
, namelen
))
341 void dlm_hb_event_notify_attached(struct dlm_ctxt
*dlm
, int idx
, int node_up
)
343 struct dlm_master_list_entry
*mle
;
345 assert_spin_locked(&dlm
->spinlock
);
347 list_for_each_entry(mle
, &dlm
->mle_hb_events
, hb_events
) {
349 dlm_mle_node_up(dlm
, mle
, NULL
, idx
);
351 dlm_mle_node_down(dlm
, mle
, NULL
, idx
);
355 static void dlm_mle_node_down(struct dlm_ctxt
*dlm
,
356 struct dlm_master_list_entry
*mle
,
357 struct o2nm_node
*node
, int idx
)
359 spin_lock(&mle
->spinlock
);
361 if (!test_bit(idx
, mle
->node_map
))
362 mlog(0, "node %u already removed from nodemap!\n", idx
);
364 clear_bit(idx
, mle
->node_map
);
366 spin_unlock(&mle
->spinlock
);
369 static void dlm_mle_node_up(struct dlm_ctxt
*dlm
,
370 struct dlm_master_list_entry
*mle
,
371 struct o2nm_node
*node
, int idx
)
373 spin_lock(&mle
->spinlock
);
375 if (test_bit(idx
, mle
->node_map
))
376 mlog(0, "node %u already in node map!\n", idx
);
378 set_bit(idx
, mle
->node_map
);
380 spin_unlock(&mle
->spinlock
);
384 int dlm_init_mle_cache(void)
386 dlm_mle_cache
= kmem_cache_create("o2dlm_mle",
387 sizeof(struct dlm_master_list_entry
),
388 0, SLAB_HWCACHE_ALIGN
,
390 if (dlm_mle_cache
== NULL
)
395 void dlm_destroy_mle_cache(void)
398 kmem_cache_destroy(dlm_mle_cache
);
401 static void dlm_mle_release(struct kref
*kref
)
403 struct dlm_master_list_entry
*mle
;
404 struct dlm_ctxt
*dlm
;
408 mle
= container_of(kref
, struct dlm_master_list_entry
, mle_refs
);
411 if (mle
->type
!= DLM_MLE_MASTER
) {
412 mlog(0, "calling mle_release for %.*s, type %d\n",
413 mle
->u
.name
.len
, mle
->u
.name
.name
, mle
->type
);
415 mlog(0, "calling mle_release for %.*s, type %d\n",
416 mle
->u
.res
->lockname
.len
,
417 mle
->u
.res
->lockname
.name
, mle
->type
);
419 assert_spin_locked(&dlm
->spinlock
);
420 assert_spin_locked(&dlm
->master_lock
);
422 /* remove from list if not already */
423 if (!list_empty(&mle
->list
))
424 list_del_init(&mle
->list
);
426 /* detach the mle from the domain node up/down events */
427 __dlm_mle_detach_hb_events(dlm
, mle
);
429 /* NOTE: kfree under spinlock here.
430 * if this is bad, we can move this to a freelist. */
431 kmem_cache_free(dlm_mle_cache
, mle
);
436 * LOCK RESOURCE FUNCTIONS
439 int dlm_init_master_caches(void)
441 dlm_lockres_cache
= kmem_cache_create("o2dlm_lockres",
442 sizeof(struct dlm_lock_resource
),
443 0, SLAB_HWCACHE_ALIGN
, NULL
);
444 if (!dlm_lockres_cache
)
447 dlm_lockname_cache
= kmem_cache_create("o2dlm_lockname",
448 DLM_LOCKID_NAME_MAX
, 0,
449 SLAB_HWCACHE_ALIGN
, NULL
);
450 if (!dlm_lockname_cache
)
455 dlm_destroy_master_caches();
459 void dlm_destroy_master_caches(void)
461 if (dlm_lockname_cache
)
462 kmem_cache_destroy(dlm_lockname_cache
);
464 if (dlm_lockres_cache
)
465 kmem_cache_destroy(dlm_lockres_cache
);
468 static void dlm_set_lockres_owner(struct dlm_ctxt
*dlm
,
469 struct dlm_lock_resource
*res
,
472 assert_spin_locked(&res
->spinlock
);
474 mlog_entry("%.*s, %u\n", res
->lockname
.len
, res
->lockname
.name
, owner
);
476 if (owner
== dlm
->node_num
)
477 atomic_inc(&dlm
->local_resources
);
478 else if (owner
== DLM_LOCK_RES_OWNER_UNKNOWN
)
479 atomic_inc(&dlm
->unknown_resources
);
481 atomic_inc(&dlm
->remote_resources
);
486 void dlm_change_lockres_owner(struct dlm_ctxt
*dlm
,
487 struct dlm_lock_resource
*res
, u8 owner
)
489 assert_spin_locked(&res
->spinlock
);
491 if (owner
== res
->owner
)
494 if (res
->owner
== dlm
->node_num
)
495 atomic_dec(&dlm
->local_resources
);
496 else if (res
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
)
497 atomic_dec(&dlm
->unknown_resources
);
499 atomic_dec(&dlm
->remote_resources
);
501 dlm_set_lockres_owner(dlm
, res
, owner
);
505 static void dlm_lockres_release(struct kref
*kref
)
507 struct dlm_lock_resource
*res
;
508 struct dlm_ctxt
*dlm
;
510 res
= container_of(kref
, struct dlm_lock_resource
, refs
);
513 /* This should not happen -- all lockres' have a name
514 * associated with them at init time. */
515 BUG_ON(!res
->lockname
.name
);
517 mlog(0, "destroying lockres %.*s\n", res
->lockname
.len
,
520 spin_lock(&dlm
->track_lock
);
521 if (!list_empty(&res
->tracking
))
522 list_del_init(&res
->tracking
);
524 mlog(ML_ERROR
, "Resource %.*s not on the Tracking list\n",
525 res
->lockname
.len
, res
->lockname
.name
);
526 dlm_print_one_lock_resource(res
);
528 spin_unlock(&dlm
->track_lock
);
532 if (!hlist_unhashed(&res
->hash_node
) ||
533 !list_empty(&res
->granted
) ||
534 !list_empty(&res
->converting
) ||
535 !list_empty(&res
->blocked
) ||
536 !list_empty(&res
->dirty
) ||
537 !list_empty(&res
->recovering
) ||
538 !list_empty(&res
->purge
)) {
540 "Going to BUG for resource %.*s."
541 " We're on a list! [%c%c%c%c%c%c%c]\n",
542 res
->lockname
.len
, res
->lockname
.name
,
543 !hlist_unhashed(&res
->hash_node
) ? 'H' : ' ',
544 !list_empty(&res
->granted
) ? 'G' : ' ',
545 !list_empty(&res
->converting
) ? 'C' : ' ',
546 !list_empty(&res
->blocked
) ? 'B' : ' ',
547 !list_empty(&res
->dirty
) ? 'D' : ' ',
548 !list_empty(&res
->recovering
) ? 'R' : ' ',
549 !list_empty(&res
->purge
) ? 'P' : ' ');
551 dlm_print_one_lock_resource(res
);
554 /* By the time we're ready to blow this guy away, we shouldn't
555 * be on any lists. */
556 BUG_ON(!hlist_unhashed(&res
->hash_node
));
557 BUG_ON(!list_empty(&res
->granted
));
558 BUG_ON(!list_empty(&res
->converting
));
559 BUG_ON(!list_empty(&res
->blocked
));
560 BUG_ON(!list_empty(&res
->dirty
));
561 BUG_ON(!list_empty(&res
->recovering
));
562 BUG_ON(!list_empty(&res
->purge
));
564 kmem_cache_free(dlm_lockname_cache
, (void *)res
->lockname
.name
);
566 kmem_cache_free(dlm_lockres_cache
, res
);
569 void dlm_lockres_put(struct dlm_lock_resource
*res
)
571 kref_put(&res
->refs
, dlm_lockres_release
);
574 static void dlm_init_lockres(struct dlm_ctxt
*dlm
,
575 struct dlm_lock_resource
*res
,
576 const char *name
, unsigned int namelen
)
580 /* If we memset here, we lose our reference to the kmalloc'd
581 * res->lockname.name, so be sure to init every field
584 qname
= (char *) res
->lockname
.name
;
585 memcpy(qname
, name
, namelen
);
587 res
->lockname
.len
= namelen
;
588 res
->lockname
.hash
= dlm_lockid_hash(name
, namelen
);
590 init_waitqueue_head(&res
->wq
);
591 spin_lock_init(&res
->spinlock
);
592 INIT_HLIST_NODE(&res
->hash_node
);
593 INIT_LIST_HEAD(&res
->granted
);
594 INIT_LIST_HEAD(&res
->converting
);
595 INIT_LIST_HEAD(&res
->blocked
);
596 INIT_LIST_HEAD(&res
->dirty
);
597 INIT_LIST_HEAD(&res
->recovering
);
598 INIT_LIST_HEAD(&res
->purge
);
599 INIT_LIST_HEAD(&res
->tracking
);
600 atomic_set(&res
->asts_reserved
, 0);
601 res
->migration_pending
= 0;
602 res
->inflight_locks
= 0;
604 /* put in dlm_lockres_release */
608 kref_init(&res
->refs
);
610 /* just for consistency */
611 spin_lock(&res
->spinlock
);
612 dlm_set_lockres_owner(dlm
, res
, DLM_LOCK_RES_OWNER_UNKNOWN
);
613 spin_unlock(&res
->spinlock
);
615 res
->state
= DLM_LOCK_RES_IN_PROGRESS
;
619 spin_lock(&dlm
->spinlock
);
620 list_add_tail(&res
->tracking
, &dlm
->tracking_list
);
621 spin_unlock(&dlm
->spinlock
);
623 memset(res
->lvb
, 0, DLM_LVB_LEN
);
624 memset(res
->refmap
, 0, sizeof(res
->refmap
));
627 struct dlm_lock_resource
*dlm_new_lockres(struct dlm_ctxt
*dlm
,
629 unsigned int namelen
)
631 struct dlm_lock_resource
*res
= NULL
;
633 res
= (struct dlm_lock_resource
*)
634 kmem_cache_zalloc(dlm_lockres_cache
, GFP_NOFS
);
638 res
->lockname
.name
= (char *)
639 kmem_cache_zalloc(dlm_lockname_cache
, GFP_NOFS
);
640 if (!res
->lockname
.name
)
643 dlm_init_lockres(dlm
, res
, name
, namelen
);
647 if (res
&& res
->lockname
.name
)
648 kmem_cache_free(dlm_lockname_cache
, (void *)res
->lockname
.name
);
651 kmem_cache_free(dlm_lockres_cache
, res
);
655 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt
*dlm
,
656 struct dlm_lock_resource
*res
,
662 assert_spin_locked(&res
->spinlock
);
664 if (!test_bit(dlm
->node_num
, res
->refmap
)) {
665 BUG_ON(res
->inflight_locks
!= 0);
666 dlm_lockres_set_refmap_bit(dlm
->node_num
, res
);
668 res
->inflight_locks
++;
669 mlog(0, "%s:%.*s: inflight++: now %u\n",
670 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
671 res
->inflight_locks
);
674 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt
*dlm
,
675 struct dlm_lock_resource
*res
,
679 assert_spin_locked(&res
->spinlock
);
681 BUG_ON(res
->inflight_locks
== 0);
682 res
->inflight_locks
--;
683 mlog(0, "%s:%.*s: inflight--: now %u\n",
684 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
685 res
->inflight_locks
);
686 if (res
->inflight_locks
== 0)
687 dlm_lockres_clear_refmap_bit(dlm
->node_num
, res
);
692 * lookup a lock resource by name.
693 * may already exist in the hashtable.
694 * lockid is null terminated
696 * if not, allocate enough for the lockres and for
697 * the temporary structure used in doing the mastering.
699 * also, do a lookup in the dlm->master_list to see
700 * if another node has begun mastering the same lock.
701 * if so, there should be a block entry in there
702 * for this name, and we should *not* attempt to master
703 * the lock here. need to wait around for that node
704 * to assert_master (or die).
707 struct dlm_lock_resource
* dlm_get_lock_resource(struct dlm_ctxt
*dlm
,
712 struct dlm_lock_resource
*tmpres
=NULL
, *res
=NULL
;
713 struct dlm_master_list_entry
*mle
= NULL
;
714 struct dlm_master_list_entry
*alloc_mle
= NULL
;
717 struct dlm_node_iter iter
;
720 int bit
, wait_on_recovery
= 0;
721 int drop_inflight_if_nonlocal
= 0;
725 hash
= dlm_lockid_hash(lockid
, namelen
);
727 mlog(0, "get lockres %s (len %d)\n", lockid
, namelen
);
730 spin_lock(&dlm
->spinlock
);
731 tmpres
= __dlm_lookup_lockres_full(dlm
, lockid
, namelen
, hash
);
733 int dropping_ref
= 0;
735 spin_unlock(&dlm
->spinlock
);
737 spin_lock(&tmpres
->spinlock
);
738 /* We wait for the other thread that is mastering the resource */
739 if (tmpres
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
740 __dlm_wait_on_lockres(tmpres
);
741 BUG_ON(tmpres
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
);
744 if (tmpres
->owner
== dlm
->node_num
) {
745 BUG_ON(tmpres
->state
& DLM_LOCK_RES_DROPPING_REF
);
746 dlm_lockres_grab_inflight_ref(dlm
, tmpres
);
747 } else if (tmpres
->state
& DLM_LOCK_RES_DROPPING_REF
)
749 spin_unlock(&tmpres
->spinlock
);
751 /* wait until done messaging the master, drop our ref to allow
752 * the lockres to be purged, start over. */
754 spin_lock(&tmpres
->spinlock
);
755 __dlm_wait_on_lockres_flags(tmpres
, DLM_LOCK_RES_DROPPING_REF
);
756 spin_unlock(&tmpres
->spinlock
);
757 dlm_lockres_put(tmpres
);
762 mlog(0, "found in hash!\n");
764 dlm_lockres_put(res
);
770 spin_unlock(&dlm
->spinlock
);
771 mlog(0, "allocating a new resource\n");
772 /* nothing found and we need to allocate one. */
773 alloc_mle
= (struct dlm_master_list_entry
*)
774 kmem_cache_alloc(dlm_mle_cache
, GFP_NOFS
);
777 res
= dlm_new_lockres(dlm
, lockid
, namelen
);
783 mlog(0, "no lockres found, allocated our own: %p\n", res
);
785 if (flags
& LKM_LOCAL
) {
786 /* caller knows it's safe to assume it's not mastered elsewhere
787 * DONE! return right away */
788 spin_lock(&res
->spinlock
);
789 dlm_change_lockres_owner(dlm
, res
, dlm
->node_num
);
790 __dlm_insert_lockres(dlm
, res
);
791 dlm_lockres_grab_inflight_ref(dlm
, res
);
792 spin_unlock(&res
->spinlock
);
793 spin_unlock(&dlm
->spinlock
);
794 /* lockres still marked IN_PROGRESS */
798 /* check master list to see if another node has started mastering it */
799 spin_lock(&dlm
->master_lock
);
801 /* if we found a block, wait for lock to be mastered by another node */
802 blocked
= dlm_find_mle(dlm
, &mle
, (char *)lockid
, namelen
);
805 if (mle
->type
== DLM_MLE_MASTER
) {
806 mlog(ML_ERROR
, "master entry for nonexistent lock!\n");
809 mig
= (mle
->type
== DLM_MLE_MIGRATION
);
810 /* if there is a migration in progress, let the migration
811 * finish before continuing. we can wait for the absence
812 * of the MIGRATION mle: either the migrate finished or
813 * one of the nodes died and the mle was cleaned up.
814 * if there is a BLOCK here, but it already has a master
815 * set, we are too late. the master does not have a ref
816 * for us in the refmap. detach the mle and drop it.
817 * either way, go back to the top and start over. */
818 if (mig
|| mle
->master
!= O2NM_MAX_NODES
) {
819 BUG_ON(mig
&& mle
->master
== dlm
->node_num
);
820 /* we arrived too late. the master does not
821 * have a ref for us. retry. */
822 mlog(0, "%s:%.*s: late on %s\n",
823 dlm
->name
, namelen
, lockid
,
824 mig
? "MIGRATION" : "BLOCK");
825 spin_unlock(&dlm
->master_lock
);
826 spin_unlock(&dlm
->spinlock
);
828 /* master is known, detach */
830 dlm_mle_detach_hb_events(dlm
, mle
);
833 /* this is lame, but we cant wait on either
834 * the mle or lockres waitqueue here */
840 /* go ahead and try to master lock on this node */
842 /* make sure this does not get freed below */
844 dlm_init_mle(mle
, DLM_MLE_MASTER
, dlm
, res
, NULL
, 0);
845 set_bit(dlm
->node_num
, mle
->maybe_map
);
846 list_add(&mle
->list
, &dlm
->master_list
);
848 /* still holding the dlm spinlock, check the recovery map
849 * to see if there are any nodes that still need to be
850 * considered. these will not appear in the mle nodemap
851 * but they might own this lockres. wait on them. */
852 bit
= find_next_bit(dlm
->recovery_map
, O2NM_MAX_NODES
, 0);
853 if (bit
< O2NM_MAX_NODES
) {
854 mlog(ML_NOTICE
, "%s:%.*s: at least one node (%d) to "
855 "recover before lock mastery can begin\n",
856 dlm
->name
, namelen
, (char *)lockid
, bit
);
857 wait_on_recovery
= 1;
861 /* at this point there is either a DLM_MLE_BLOCK or a
862 * DLM_MLE_MASTER on the master list, so it's safe to add the
863 * lockres to the hashtable. anyone who finds the lock will
864 * still have to wait on the IN_PROGRESS. */
866 /* finally add the lockres to its hash bucket */
867 __dlm_insert_lockres(dlm
, res
);
868 /* since this lockres is new it doesnt not require the spinlock */
869 dlm_lockres_grab_inflight_ref_new(dlm
, res
);
871 /* if this node does not become the master make sure to drop
872 * this inflight reference below */
873 drop_inflight_if_nonlocal
= 1;
875 /* get an extra ref on the mle in case this is a BLOCK
876 * if so, the creator of the BLOCK may try to put the last
877 * ref at this time in the assert master handler, so we
878 * need an extra one to keep from a bad ptr deref. */
879 dlm_get_mle_inuse(mle
);
880 spin_unlock(&dlm
->master_lock
);
881 spin_unlock(&dlm
->spinlock
);
884 while (wait_on_recovery
) {
885 /* any cluster changes that occurred after dropping the
886 * dlm spinlock would be detectable be a change on the mle,
887 * so we only need to clear out the recovery map once. */
888 if (dlm_is_recovery_lock(lockid
, namelen
)) {
889 mlog(ML_NOTICE
, "%s: recovery map is not empty, but "
890 "must master $RECOVERY lock now\n", dlm
->name
);
891 if (!dlm_pre_master_reco_lockres(dlm
, res
))
892 wait_on_recovery
= 0;
894 mlog(0, "%s: waiting 500ms for heartbeat state "
895 "change\n", dlm
->name
);
901 dlm_kick_recovery_thread(dlm
);
903 dlm_wait_for_recovery(dlm
);
905 spin_lock(&dlm
->spinlock
);
906 bit
= find_next_bit(dlm
->recovery_map
, O2NM_MAX_NODES
, 0);
907 if (bit
< O2NM_MAX_NODES
) {
908 mlog(ML_NOTICE
, "%s:%.*s: at least one node (%d) to "
909 "recover before lock mastery can begin\n",
910 dlm
->name
, namelen
, (char *)lockid
, bit
);
911 wait_on_recovery
= 1;
913 wait_on_recovery
= 0;
914 spin_unlock(&dlm
->spinlock
);
916 if (wait_on_recovery
)
917 dlm_wait_for_node_recovery(dlm
, bit
, 10000);
920 /* must wait for lock to be mastered elsewhere */
925 dlm_node_iter_init(mle
->vote_map
, &iter
);
926 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
927 ret
= dlm_do_master_request(res
, mle
, nodenum
);
930 if (mle
->master
!= O2NM_MAX_NODES
) {
931 /* found a master ! */
932 if (mle
->master
<= nodenum
)
934 /* if our master request has not reached the master
935 * yet, keep going until it does. this is how the
936 * master will know that asserts are needed back to
937 * the lower nodes. */
938 mlog(0, "%s:%.*s: requests only up to %u but master "
939 "is %u, keep going\n", dlm
->name
, namelen
,
940 lockid
, nodenum
, mle
->master
);
945 /* keep going until the response map includes all nodes */
946 ret
= dlm_wait_for_lock_mastery(dlm
, res
, mle
, &blocked
);
948 wait_on_recovery
= 1;
949 mlog(0, "%s:%.*s: node map changed, redo the "
950 "master request now, blocked=%d\n",
951 dlm
->name
, res
->lockname
.len
,
952 res
->lockname
.name
, blocked
);
954 mlog(ML_ERROR
, "%s:%.*s: spinning on "
955 "dlm_wait_for_lock_mastery, blocked=%d\n",
956 dlm
->name
, res
->lockname
.len
,
957 res
->lockname
.name
, blocked
);
958 dlm_print_one_lock_resource(res
);
959 dlm_print_one_mle(mle
);
965 mlog(0, "lockres mastered by %u\n", res
->owner
);
966 /* make sure we never continue without this */
967 BUG_ON(res
->owner
== O2NM_MAX_NODES
);
969 /* master is known, detach if not already detached */
970 dlm_mle_detach_hb_events(dlm
, mle
);
972 /* put the extra ref */
973 dlm_put_mle_inuse(mle
);
976 spin_lock(&res
->spinlock
);
977 if (res
->owner
!= dlm
->node_num
&& drop_inflight_if_nonlocal
)
978 dlm_lockres_drop_inflight_ref(dlm
, res
);
979 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
980 spin_unlock(&res
->spinlock
);
984 /* need to free the unused mle */
986 kmem_cache_free(dlm_mle_cache
, alloc_mle
);
992 #define DLM_MASTERY_TIMEOUT_MS 5000
994 static int dlm_wait_for_lock_mastery(struct dlm_ctxt
*dlm
,
995 struct dlm_lock_resource
*res
,
996 struct dlm_master_list_entry
*mle
,
1001 int map_changed
, voting_done
;
1008 /* check if another node has already become the owner */
1009 spin_lock(&res
->spinlock
);
1010 if (res
->owner
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1011 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm
->name
,
1012 res
->lockname
.len
, res
->lockname
.name
, res
->owner
);
1013 spin_unlock(&res
->spinlock
);
1014 /* this will cause the master to re-assert across
1015 * the whole cluster, freeing up mles */
1016 if (res
->owner
!= dlm
->node_num
) {
1017 ret
= dlm_do_master_request(res
, mle
, res
->owner
);
1019 /* give recovery a chance to run */
1020 mlog(ML_ERROR
, "link to %u went down?: %d\n", res
->owner
, ret
);
1028 spin_unlock(&res
->spinlock
);
1030 spin_lock(&mle
->spinlock
);
1032 map_changed
= (memcmp(mle
->vote_map
, mle
->node_map
,
1033 sizeof(mle
->vote_map
)) != 0);
1034 voting_done
= (memcmp(mle
->vote_map
, mle
->response_map
,
1035 sizeof(mle
->vote_map
)) == 0);
1037 /* restart if we hit any errors */
1040 mlog(0, "%s: %.*s: node map changed, restarting\n",
1041 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
1042 ret
= dlm_restart_lock_mastery(dlm
, res
, mle
, *blocked
);
1043 b
= (mle
->type
== DLM_MLE_BLOCK
);
1044 if ((*blocked
&& !b
) || (!*blocked
&& b
)) {
1045 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1046 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
1050 spin_unlock(&mle
->spinlock
);
1055 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1056 "rechecking now\n", dlm
->name
, res
->lockname
.len
,
1057 res
->lockname
.name
);
1061 mlog(0, "map not changed and voting not done "
1062 "for %s:%.*s\n", dlm
->name
, res
->lockname
.len
,
1063 res
->lockname
.name
);
1067 if (m
!= O2NM_MAX_NODES
) {
1068 /* another node has done an assert!
1073 /* have all nodes responded? */
1074 if (voting_done
&& !*blocked
) {
1075 bit
= find_next_bit(mle
->maybe_map
, O2NM_MAX_NODES
, 0);
1076 if (dlm
->node_num
<= bit
) {
1077 /* my node number is lowest.
1078 * now tell other nodes that I am
1079 * mastering this. */
1080 mle
->master
= dlm
->node_num
;
1081 /* ref was grabbed in get_lock_resource
1082 * will be dropped in dlmlock_master */
1086 /* if voting is done, but we have not received
1087 * an assert master yet, we must sleep */
1091 spin_unlock(&mle
->spinlock
);
1093 /* sleep if we haven't finished voting yet */
1095 unsigned long timeo
= msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS
);
1098 if (atomic_read(&mle->mle_refs.refcount) < 2)
1099 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1100 atomic_read(&mle->mle_refs.refcount),
1101 res->lockname.len, res->lockname.name);
1103 atomic_set(&mle
->woken
, 0);
1104 (void)wait_event_timeout(mle
->wq
,
1105 (atomic_read(&mle
->woken
) == 1),
1107 if (res
->owner
== O2NM_MAX_NODES
) {
1108 mlog(0, "%s:%.*s: waiting again\n", dlm
->name
,
1109 res
->lockname
.len
, res
->lockname
.name
);
1112 mlog(0, "done waiting, master is %u\n", res
->owner
);
1120 mlog(0, "about to master %.*s here, this=%u\n",
1121 res
->lockname
.len
, res
->lockname
.name
, m
);
1122 ret
= dlm_do_assert_master(dlm
, res
, mle
->vote_map
, 0);
1124 /* This is a failure in the network path,
1125 * not in the response to the assert_master
1126 * (any nonzero response is a BUG on this node).
1127 * Most likely a socket just got disconnected
1128 * due to node death. */
1131 /* no longer need to restart lock mastery.
1132 * all living nodes have been contacted. */
1136 /* set the lockres owner */
1137 spin_lock(&res
->spinlock
);
1138 /* mastery reference obtained either during
1139 * assert_master_handler or in get_lock_resource */
1140 dlm_change_lockres_owner(dlm
, res
, m
);
1141 spin_unlock(&res
->spinlock
);
1147 struct dlm_bitmap_diff_iter
1150 unsigned long *orig_bm
;
1151 unsigned long *cur_bm
;
1152 unsigned long diff_bm
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
1155 enum dlm_node_state_change
1162 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter
*iter
,
1163 unsigned long *orig_bm
,
1164 unsigned long *cur_bm
)
1166 unsigned long p1
, p2
;
1170 iter
->orig_bm
= orig_bm
;
1171 iter
->cur_bm
= cur_bm
;
1173 for (i
= 0; i
< BITS_TO_LONGS(O2NM_MAX_NODES
); i
++) {
1174 p1
= *(iter
->orig_bm
+ i
);
1175 p2
= *(iter
->cur_bm
+ i
);
1176 iter
->diff_bm
[i
] = (p1
& ~p2
) | (p2
& ~p1
);
1180 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter
*iter
,
1181 enum dlm_node_state_change
*state
)
1185 if (iter
->curnode
>= O2NM_MAX_NODES
)
1188 bit
= find_next_bit(iter
->diff_bm
, O2NM_MAX_NODES
,
1190 if (bit
>= O2NM_MAX_NODES
) {
1191 iter
->curnode
= O2NM_MAX_NODES
;
1195 /* if it was there in the original then this node died */
1196 if (test_bit(bit
, iter
->orig_bm
))
1201 iter
->curnode
= bit
;
1206 static int dlm_restart_lock_mastery(struct dlm_ctxt
*dlm
,
1207 struct dlm_lock_resource
*res
,
1208 struct dlm_master_list_entry
*mle
,
1211 struct dlm_bitmap_diff_iter bdi
;
1212 enum dlm_node_state_change sc
;
1216 mlog(0, "something happened such that the "
1217 "master process may need to be restarted!\n");
1219 assert_spin_locked(&mle
->spinlock
);
1221 dlm_bitmap_diff_iter_init(&bdi
, mle
->vote_map
, mle
->node_map
);
1222 node
= dlm_bitmap_diff_iter_next(&bdi
, &sc
);
1224 if (sc
== NODE_UP
) {
1225 /* a node came up. clear any old vote from
1226 * the response map and set it in the vote map
1227 * then restart the mastery. */
1228 mlog(ML_NOTICE
, "node %d up while restarting\n", node
);
1230 /* redo the master request, but only for the new node */
1231 mlog(0, "sending request to new node\n");
1232 clear_bit(node
, mle
->response_map
);
1233 set_bit(node
, mle
->vote_map
);
1235 mlog(ML_ERROR
, "node down! %d\n", node
);
1237 int lowest
= find_next_bit(mle
->maybe_map
,
1240 /* act like it was never there */
1241 clear_bit(node
, mle
->maybe_map
);
1243 if (node
== lowest
) {
1244 mlog(0, "expected master %u died"
1245 " while this node was blocked "
1246 "waiting on it!\n", node
);
1247 lowest
= find_next_bit(mle
->maybe_map
,
1250 if (lowest
< O2NM_MAX_NODES
) {
1251 mlog(0, "%s:%.*s:still "
1252 "blocked. waiting on %u "
1258 /* mle is an MLE_BLOCK, but
1259 * there is now nothing left to
1260 * block on. we need to return
1261 * all the way back out and try
1262 * again with an MLE_MASTER.
1263 * dlm_do_local_recovery_cleanup
1264 * has already run, so the mle
1266 mlog(0, "%s:%.*s: no "
1267 "longer blocking. try to "
1268 "master this here\n",
1271 res
->lockname
.name
);
1272 mle
->type
= DLM_MLE_MASTER
;
1278 /* now blank out everything, as if we had never
1279 * contacted anyone */
1280 memset(mle
->maybe_map
, 0, sizeof(mle
->maybe_map
));
1281 memset(mle
->response_map
, 0, sizeof(mle
->response_map
));
1282 /* reset the vote_map to the current node_map */
1283 memcpy(mle
->vote_map
, mle
->node_map
,
1284 sizeof(mle
->node_map
));
1285 /* put myself into the maybe map */
1286 if (mle
->type
!= DLM_MLE_BLOCK
)
1287 set_bit(dlm
->node_num
, mle
->maybe_map
);
1290 node
= dlm_bitmap_diff_iter_next(&bdi
, &sc
);
1297 * DLM_MASTER_REQUEST_MSG
1299 * returns: 0 on success,
1300 * -errno on a network error
1302 * on error, the caller should assume the target node is "dead"
1306 static int dlm_do_master_request(struct dlm_lock_resource
*res
,
1307 struct dlm_master_list_entry
*mle
, int to
)
1309 struct dlm_ctxt
*dlm
= mle
->dlm
;
1310 struct dlm_master_request request
;
1311 int ret
, response
=0, resend
;
1313 memset(&request
, 0, sizeof(request
));
1314 request
.node_idx
= dlm
->node_num
;
1316 BUG_ON(mle
->type
== DLM_MLE_MIGRATION
);
1318 if (mle
->type
!= DLM_MLE_MASTER
) {
1319 request
.namelen
= mle
->u
.name
.len
;
1320 memcpy(request
.name
, mle
->u
.name
.name
, request
.namelen
);
1322 request
.namelen
= mle
->u
.res
->lockname
.len
;
1323 memcpy(request
.name
, mle
->u
.res
->lockname
.name
,
1328 ret
= o2net_send_message(DLM_MASTER_REQUEST_MSG
, dlm
->key
, &request
,
1329 sizeof(request
), to
, &response
);
1331 if (ret
== -ESRCH
) {
1332 /* should never happen */
1333 mlog(ML_ERROR
, "TCP stack not ready!\n");
1335 } else if (ret
== -EINVAL
) {
1336 mlog(ML_ERROR
, "bad args passed to o2net!\n");
1338 } else if (ret
== -ENOMEM
) {
1339 mlog(ML_ERROR
, "out of memory while trying to send "
1340 "network message! retrying\n");
1341 /* this is totally crude */
1344 } else if (!dlm_is_host_down(ret
)) {
1345 /* not a network error. bad. */
1347 mlog(ML_ERROR
, "unhandled error!");
1350 /* all other errors should be network errors,
1351 * and likely indicate node death */
1352 mlog(ML_ERROR
, "link to %d went down!\n", to
);
1358 spin_lock(&mle
->spinlock
);
1360 case DLM_MASTER_RESP_YES
:
1361 set_bit(to
, mle
->response_map
);
1362 mlog(0, "node %u is the master, response=YES\n", to
);
1363 mlog(0, "%s:%.*s: master node %u now knows I have a "
1364 "reference\n", dlm
->name
, res
->lockname
.len
,
1365 res
->lockname
.name
, to
);
1368 case DLM_MASTER_RESP_NO
:
1369 mlog(0, "node %u not master, response=NO\n", to
);
1370 set_bit(to
, mle
->response_map
);
1372 case DLM_MASTER_RESP_MAYBE
:
1373 mlog(0, "node %u not master, response=MAYBE\n", to
);
1374 set_bit(to
, mle
->response_map
);
1375 set_bit(to
, mle
->maybe_map
);
1377 case DLM_MASTER_RESP_ERROR
:
1378 mlog(0, "node %u hit an error, resending\n", to
);
1383 mlog(ML_ERROR
, "bad response! %u\n", response
);
1386 spin_unlock(&mle
->spinlock
);
1388 /* this is also totally crude */
1398 * locks that can be taken here:
1404 * if possible, TRIM THIS DOWN!!!
1406 int dlm_master_request_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
1409 u8 response
= DLM_MASTER_RESP_MAYBE
;
1410 struct dlm_ctxt
*dlm
= data
;
1411 struct dlm_lock_resource
*res
= NULL
;
1412 struct dlm_master_request
*request
= (struct dlm_master_request
*) msg
->buf
;
1413 struct dlm_master_list_entry
*mle
= NULL
, *tmpmle
= NULL
;
1415 unsigned int namelen
, hash
;
1418 int dispatch_assert
= 0;
1421 return DLM_MASTER_RESP_NO
;
1423 if (!dlm_domain_fully_joined(dlm
)) {
1424 response
= DLM_MASTER_RESP_NO
;
1428 name
= request
->name
;
1429 namelen
= request
->namelen
;
1430 hash
= dlm_lockid_hash(name
, namelen
);
1432 if (namelen
> DLM_LOCKID_NAME_MAX
) {
1433 response
= DLM_IVBUFLEN
;
1438 spin_lock(&dlm
->spinlock
);
1439 res
= __dlm_lookup_lockres(dlm
, name
, namelen
, hash
);
1441 spin_unlock(&dlm
->spinlock
);
1443 /* take care of the easy cases up front */
1444 spin_lock(&res
->spinlock
);
1445 if (res
->state
& (DLM_LOCK_RES_RECOVERING
|
1446 DLM_LOCK_RES_MIGRATING
)) {
1447 spin_unlock(&res
->spinlock
);
1448 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1449 "being recovered/migrated\n");
1450 response
= DLM_MASTER_RESP_ERROR
;
1452 kmem_cache_free(dlm_mle_cache
, mle
);
1456 if (res
->owner
== dlm
->node_num
) {
1457 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1458 dlm
->name
, namelen
, name
, request
->node_idx
);
1459 dlm_lockres_set_refmap_bit(request
->node_idx
, res
);
1460 spin_unlock(&res
->spinlock
);
1461 response
= DLM_MASTER_RESP_YES
;
1463 kmem_cache_free(dlm_mle_cache
, mle
);
1465 /* this node is the owner.
1466 * there is some extra work that needs to
1467 * happen now. the requesting node has
1468 * caused all nodes up to this one to
1469 * create mles. this node now needs to
1470 * go back and clean those up. */
1471 dispatch_assert
= 1;
1473 } else if (res
->owner
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1474 spin_unlock(&res
->spinlock
);
1475 // mlog(0, "node %u is the master\n", res->owner);
1476 response
= DLM_MASTER_RESP_NO
;
1478 kmem_cache_free(dlm_mle_cache
, mle
);
1482 /* ok, there is no owner. either this node is
1483 * being blocked, or it is actively trying to
1484 * master this lock. */
1485 if (!(res
->state
& DLM_LOCK_RES_IN_PROGRESS
)) {
1486 mlog(ML_ERROR
, "lock with no owner should be "
1491 // mlog(0, "lockres is in progress...\n");
1492 spin_lock(&dlm
->master_lock
);
1493 found
= dlm_find_mle(dlm
, &tmpmle
, name
, namelen
);
1495 mlog(ML_ERROR
, "no mle found for this lock!\n");
1499 spin_lock(&tmpmle
->spinlock
);
1500 if (tmpmle
->type
== DLM_MLE_BLOCK
) {
1501 // mlog(0, "this node is waiting for "
1502 // "lockres to be mastered\n");
1503 response
= DLM_MASTER_RESP_NO
;
1504 } else if (tmpmle
->type
== DLM_MLE_MIGRATION
) {
1505 mlog(0, "node %u is master, but trying to migrate to "
1506 "node %u.\n", tmpmle
->master
, tmpmle
->new_master
);
1507 if (tmpmle
->master
== dlm
->node_num
) {
1508 mlog(ML_ERROR
, "no owner on lockres, but this "
1509 "node is trying to migrate it to %u?!\n",
1510 tmpmle
->new_master
);
1513 /* the real master can respond on its own */
1514 response
= DLM_MASTER_RESP_NO
;
1516 } else if (tmpmle
->master
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1518 if (tmpmle
->master
== dlm
->node_num
) {
1519 response
= DLM_MASTER_RESP_YES
;
1520 /* this node will be the owner.
1521 * go back and clean the mles on any
1523 dispatch_assert
= 1;
1524 dlm_lockres_set_refmap_bit(request
->node_idx
, res
);
1525 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1526 dlm
->name
, namelen
, name
,
1529 response
= DLM_MASTER_RESP_NO
;
1531 // mlog(0, "this node is attempting to "
1532 // "master lockres\n");
1533 response
= DLM_MASTER_RESP_MAYBE
;
1536 set_bit(request
->node_idx
, tmpmle
->maybe_map
);
1537 spin_unlock(&tmpmle
->spinlock
);
1539 spin_unlock(&dlm
->master_lock
);
1540 spin_unlock(&res
->spinlock
);
1542 /* keep the mle attached to heartbeat events */
1543 dlm_put_mle(tmpmle
);
1545 kmem_cache_free(dlm_mle_cache
, mle
);
1550 * lockres doesn't exist on this node
1551 * if there is an MLE_BLOCK, return NO
1552 * if there is an MLE_MASTER, return MAYBE
1553 * otherwise, add an MLE_BLOCK, return NO
1555 spin_lock(&dlm
->master_lock
);
1556 found
= dlm_find_mle(dlm
, &tmpmle
, name
, namelen
);
1558 /* this lockid has never been seen on this node yet */
1559 // mlog(0, "no mle found\n");
1561 spin_unlock(&dlm
->master_lock
);
1562 spin_unlock(&dlm
->spinlock
);
1564 mle
= (struct dlm_master_list_entry
*)
1565 kmem_cache_alloc(dlm_mle_cache
, GFP_NOFS
);
1567 response
= DLM_MASTER_RESP_ERROR
;
1568 mlog_errno(-ENOMEM
);
1574 // mlog(0, "this is second time thru, already allocated, "
1575 // "add the block.\n");
1576 dlm_init_mle(mle
, DLM_MLE_BLOCK
, dlm
, NULL
, name
, namelen
);
1577 set_bit(request
->node_idx
, mle
->maybe_map
);
1578 list_add(&mle
->list
, &dlm
->master_list
);
1579 response
= DLM_MASTER_RESP_NO
;
1581 // mlog(0, "mle was found\n");
1583 spin_lock(&tmpmle
->spinlock
);
1584 if (tmpmle
->master
== dlm
->node_num
) {
1585 mlog(ML_ERROR
, "no lockres, but an mle with this node as master!\n");
1588 if (tmpmle
->type
== DLM_MLE_BLOCK
)
1589 response
= DLM_MASTER_RESP_NO
;
1590 else if (tmpmle
->type
== DLM_MLE_MIGRATION
) {
1591 mlog(0, "migration mle was found (%u->%u)\n",
1592 tmpmle
->master
, tmpmle
->new_master
);
1593 /* real master can respond on its own */
1594 response
= DLM_MASTER_RESP_NO
;
1596 response
= DLM_MASTER_RESP_MAYBE
;
1598 set_bit(request
->node_idx
, tmpmle
->maybe_map
);
1599 spin_unlock(&tmpmle
->spinlock
);
1601 spin_unlock(&dlm
->master_lock
);
1602 spin_unlock(&dlm
->spinlock
);
1605 /* keep the mle attached to heartbeat events */
1606 dlm_put_mle(tmpmle
);
1610 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1611 * The reference is released by dlm_assert_master_worker() under
1612 * the call to dlm_dispatch_assert_master(). If
1613 * dlm_assert_master_worker() isn't called, we drop it here.
1615 if (dispatch_assert
) {
1616 if (response
!= DLM_MASTER_RESP_YES
)
1617 mlog(ML_ERROR
, "invalid response %d\n", response
);
1619 mlog(ML_ERROR
, "bad lockres while trying to assert!\n");
1622 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1623 dlm
->node_num
, res
->lockname
.len
, res
->lockname
.name
);
1624 ret
= dlm_dispatch_assert_master(dlm
, res
, 0, request
->node_idx
,
1625 DLM_ASSERT_MASTER_MLE_CLEANUP
);
1627 mlog(ML_ERROR
, "failed to dispatch assert master work\n");
1628 response
= DLM_MASTER_RESP_ERROR
;
1629 dlm_lockres_put(res
);
1633 dlm_lockres_put(res
);
1641 * DLM_ASSERT_MASTER_MSG
1646 * NOTE: this can be used for debugging
1647 * can periodically run all locks owned by this node
1648 * and re-assert across the cluster...
1650 static int dlm_do_assert_master(struct dlm_ctxt
*dlm
,
1651 struct dlm_lock_resource
*res
,
1652 void *nodemap
, u32 flags
)
1654 struct dlm_assert_master
assert;
1656 struct dlm_node_iter iter
;
1659 const char *lockname
= res
->lockname
.name
;
1660 unsigned int namelen
= res
->lockname
.len
;
1662 BUG_ON(namelen
> O2NM_MAX_NAME_LEN
);
1664 spin_lock(&res
->spinlock
);
1665 res
->state
|= DLM_LOCK_RES_SETREF_INPROG
;
1666 spin_unlock(&res
->spinlock
);
1671 /* note that if this nodemap is empty, it returns 0 */
1672 dlm_node_iter_init(nodemap
, &iter
);
1673 while ((to
= dlm_node_iter_next(&iter
)) >= 0) {
1675 struct dlm_master_list_entry
*mle
= NULL
;
1677 mlog(0, "sending assert master to %d (%.*s)\n", to
,
1679 memset(&assert, 0, sizeof(assert));
1680 assert.node_idx
= dlm
->node_num
;
1681 assert.namelen
= namelen
;
1682 memcpy(assert.name
, lockname
, namelen
);
1683 assert.flags
= cpu_to_be32(flags
);
1685 tmpret
= o2net_send_message(DLM_ASSERT_MASTER_MSG
, dlm
->key
,
1686 &assert, sizeof(assert), to
, &r
);
1688 mlog(0, "assert_master returned %d!\n", tmpret
);
1689 if (!dlm_is_host_down(tmpret
)) {
1690 mlog(ML_ERROR
, "unhandled error=%d!\n", tmpret
);
1693 /* a node died. finish out the rest of the nodes. */
1694 mlog(0, "link to %d went down!\n", to
);
1695 /* any nonzero status return will do */
1699 /* ok, something horribly messed. kill thyself. */
1700 mlog(ML_ERROR
,"during assert master of %.*s to %u, "
1701 "got %d.\n", namelen
, lockname
, to
, r
);
1702 spin_lock(&dlm
->spinlock
);
1703 spin_lock(&dlm
->master_lock
);
1704 if (dlm_find_mle(dlm
, &mle
, (char *)lockname
,
1706 dlm_print_one_mle(mle
);
1709 spin_unlock(&dlm
->master_lock
);
1710 spin_unlock(&dlm
->spinlock
);
1714 if (r
& DLM_ASSERT_RESPONSE_REASSERT
&&
1715 !(r
& DLM_ASSERT_RESPONSE_MASTERY_REF
)) {
1716 mlog(ML_ERROR
, "%.*s: very strange, "
1717 "master MLE but no lockres on %u\n",
1718 namelen
, lockname
, to
);
1721 if (r
& DLM_ASSERT_RESPONSE_REASSERT
) {
1722 mlog(0, "%.*s: node %u create mles on other "
1723 "nodes and requests a re-assert\n",
1724 namelen
, lockname
, to
);
1727 if (r
& DLM_ASSERT_RESPONSE_MASTERY_REF
) {
1728 mlog(0, "%.*s: node %u has a reference to this "
1729 "lockres, set the bit in the refmap\n",
1730 namelen
, lockname
, to
);
1731 spin_lock(&res
->spinlock
);
1732 dlm_lockres_set_refmap_bit(to
, res
);
1733 spin_unlock(&res
->spinlock
);
1740 spin_lock(&res
->spinlock
);
1741 res
->state
&= ~DLM_LOCK_RES_SETREF_INPROG
;
1742 spin_unlock(&res
->spinlock
);
1749 * locks that can be taken here:
1755 * if possible, TRIM THIS DOWN!!!
1757 int dlm_assert_master_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
1760 struct dlm_ctxt
*dlm
= data
;
1761 struct dlm_master_list_entry
*mle
= NULL
;
1762 struct dlm_assert_master
*assert = (struct dlm_assert_master
*)msg
->buf
;
1763 struct dlm_lock_resource
*res
= NULL
;
1765 unsigned int namelen
, hash
;
1767 int master_request
= 0, have_lockres_ref
= 0;
1773 name
= assert->name
;
1774 namelen
= assert->namelen
;
1775 hash
= dlm_lockid_hash(name
, namelen
);
1776 flags
= be32_to_cpu(assert->flags
);
1778 if (namelen
> DLM_LOCKID_NAME_MAX
) {
1779 mlog(ML_ERROR
, "Invalid name length!");
1783 spin_lock(&dlm
->spinlock
);
1786 mlog(0, "assert_master with flags: %u\n", flags
);
1789 spin_lock(&dlm
->master_lock
);
1790 if (!dlm_find_mle(dlm
, &mle
, name
, namelen
)) {
1791 /* not an error, could be master just re-asserting */
1792 mlog(0, "just got an assert_master from %u, but no "
1793 "MLE for it! (%.*s)\n", assert->node_idx
,
1796 int bit
= find_next_bit (mle
->maybe_map
, O2NM_MAX_NODES
, 0);
1797 if (bit
>= O2NM_MAX_NODES
) {
1798 /* not necessarily an error, though less likely.
1799 * could be master just re-asserting. */
1800 mlog(0, "no bits set in the maybe_map, but %u "
1801 "is asserting! (%.*s)\n", assert->node_idx
,
1803 } else if (bit
!= assert->node_idx
) {
1804 if (flags
& DLM_ASSERT_MASTER_MLE_CLEANUP
) {
1805 mlog(0, "master %u was found, %u should "
1806 "back off\n", assert->node_idx
, bit
);
1808 /* with the fix for bug 569, a higher node
1809 * number winning the mastery will respond
1810 * YES to mastery requests, but this node
1811 * had no way of knowing. let it pass. */
1812 mlog(0, "%u is the lowest node, "
1813 "%u is asserting. (%.*s) %u must "
1814 "have begun after %u won.\n", bit
,
1815 assert->node_idx
, namelen
, name
, bit
,
1819 if (mle
->type
== DLM_MLE_MIGRATION
) {
1820 if (flags
& DLM_ASSERT_MASTER_MLE_CLEANUP
) {
1821 mlog(0, "%s:%.*s: got cleanup assert"
1822 " from %u for migration\n",
1823 dlm
->name
, namelen
, name
,
1825 } else if (!(flags
& DLM_ASSERT_MASTER_FINISH_MIGRATION
)) {
1826 mlog(0, "%s:%.*s: got unrelated assert"
1827 " from %u for migration, ignoring\n",
1828 dlm
->name
, namelen
, name
,
1831 spin_unlock(&dlm
->master_lock
);
1832 spin_unlock(&dlm
->spinlock
);
1837 spin_unlock(&dlm
->master_lock
);
1839 /* ok everything checks out with the MLE
1840 * now check to see if there is a lockres */
1841 res
= __dlm_lookup_lockres(dlm
, name
, namelen
, hash
);
1843 spin_lock(&res
->spinlock
);
1844 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
1845 mlog(ML_ERROR
, "%u asserting but %.*s is "
1846 "RECOVERING!\n", assert->node_idx
, namelen
, name
);
1850 if (res
->owner
!= DLM_LOCK_RES_OWNER_UNKNOWN
&&
1851 res
->owner
!= assert->node_idx
) {
1852 mlog(ML_ERROR
, "DIE! Mastery assert from %u, "
1853 "but current owner is %u! (%.*s)\n",
1854 assert->node_idx
, res
->owner
, namelen
,
1856 __dlm_print_one_lock_resource(res
);
1859 } else if (mle
->type
!= DLM_MLE_MIGRATION
) {
1860 if (res
->owner
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1861 /* owner is just re-asserting */
1862 if (res
->owner
== assert->node_idx
) {
1863 mlog(0, "owner %u re-asserting on "
1864 "lock %.*s\n", assert->node_idx
,
1868 mlog(ML_ERROR
, "got assert_master from "
1869 "node %u, but %u is the owner! "
1870 "(%.*s)\n", assert->node_idx
,
1871 res
->owner
, namelen
, name
);
1874 if (!(res
->state
& DLM_LOCK_RES_IN_PROGRESS
)) {
1875 mlog(ML_ERROR
, "got assert from %u, but lock "
1876 "with no owner should be "
1877 "in-progress! (%.*s)\n",
1882 } else /* mle->type == DLM_MLE_MIGRATION */ {
1883 /* should only be getting an assert from new master */
1884 if (assert->node_idx
!= mle
->new_master
) {
1885 mlog(ML_ERROR
, "got assert from %u, but "
1886 "new master is %u, and old master "
1888 assert->node_idx
, mle
->new_master
,
1889 mle
->master
, namelen
, name
);
1895 spin_unlock(&res
->spinlock
);
1897 spin_unlock(&dlm
->spinlock
);
1899 // mlog(0, "woo! got an assert_master from node %u!\n",
1900 // assert->node_idx);
1906 spin_lock(&mle
->spinlock
);
1907 if (mle
->type
== DLM_MLE_BLOCK
|| mle
->type
== DLM_MLE_MIGRATION
)
1910 /* MASTER mle: if any bits set in the response map
1911 * then the calling node needs to re-assert to clear
1912 * up nodes that this node contacted */
1913 while ((nn
= find_next_bit (mle
->response_map
, O2NM_MAX_NODES
,
1914 nn
+1)) < O2NM_MAX_NODES
) {
1915 if (nn
!= dlm
->node_num
&& nn
!= assert->node_idx
)
1919 mle
->master
= assert->node_idx
;
1920 atomic_set(&mle
->woken
, 1);
1922 spin_unlock(&mle
->spinlock
);
1926 spin_lock(&res
->spinlock
);
1927 if (mle
->type
== DLM_MLE_MIGRATION
) {
1928 mlog(0, "finishing off migration of lockres %.*s, "
1930 res
->lockname
.len
, res
->lockname
.name
,
1931 dlm
->node_num
, mle
->new_master
);
1932 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
1934 dlm_change_lockres_owner(dlm
, res
, mle
->new_master
);
1935 BUG_ON(res
->state
& DLM_LOCK_RES_DIRTY
);
1937 dlm_change_lockres_owner(dlm
, res
, mle
->master
);
1939 spin_unlock(&res
->spinlock
);
1940 have_lockres_ref
= 1;
1945 /* master is known, detach if not already detached.
1946 * ensures that only one assert_master call will happen
1948 spin_lock(&dlm
->spinlock
);
1949 spin_lock(&dlm
->master_lock
);
1951 rr
= atomic_read(&mle
->mle_refs
.refcount
);
1952 if (mle
->inuse
> 0) {
1953 if (extra_ref
&& rr
< 3)
1955 else if (!extra_ref
&& rr
< 2)
1958 if (extra_ref
&& rr
< 2)
1960 else if (!extra_ref
&& rr
< 1)
1964 mlog(ML_ERROR
, "%s:%.*s: got assert master from %u "
1965 "that will mess up this node, refs=%d, extra=%d, "
1966 "inuse=%d\n", dlm
->name
, namelen
, name
,
1967 assert->node_idx
, rr
, extra_ref
, mle
->inuse
);
1968 dlm_print_one_mle(mle
);
1970 list_del_init(&mle
->list
);
1971 __dlm_mle_detach_hb_events(dlm
, mle
);
1974 /* the assert master message now balances the extra
1975 * ref given by the master / migration request message.
1976 * if this is the last put, it will be removed
1980 spin_unlock(&dlm
->master_lock
);
1981 spin_unlock(&dlm
->spinlock
);
1983 if (res
->owner
!= assert->node_idx
) {
1984 mlog(0, "assert_master from %u, but current "
1985 "owner is %u (%.*s), no mle\n", assert->node_idx
,
1986 res
->owner
, namelen
, name
);
1993 spin_lock(&res
->spinlock
);
1994 res
->state
|= DLM_LOCK_RES_SETREF_INPROG
;
1995 spin_unlock(&res
->spinlock
);
1996 *ret_data
= (void *)res
;
1999 if (master_request
) {
2000 mlog(0, "need to tell master to reassert\n");
2001 /* positive. negative would shoot down the node. */
2002 ret
|= DLM_ASSERT_RESPONSE_REASSERT
;
2003 if (!have_lockres_ref
) {
2004 mlog(ML_ERROR
, "strange, got assert from %u, MASTER "
2005 "mle present here for %s:%.*s, but no lockres!\n",
2006 assert->node_idx
, dlm
->name
, namelen
, name
);
2009 if (have_lockres_ref
) {
2010 /* let the master know we have a reference to the lockres */
2011 ret
|= DLM_ASSERT_RESPONSE_MASTERY_REF
;
2012 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2013 dlm
->name
, namelen
, name
, assert->node_idx
);
2018 /* kill the caller! */
2019 mlog(ML_ERROR
, "Bad message received from another node. Dumping state "
2020 "and killing the other node now! This node is OK and can continue.\n");
2021 __dlm_print_one_lock_resource(res
);
2022 spin_unlock(&res
->spinlock
);
2023 spin_unlock(&dlm
->spinlock
);
2024 *ret_data
= (void *)res
;
2029 void dlm_assert_master_post_handler(int status
, void *data
, void *ret_data
)
2031 struct dlm_lock_resource
*res
= (struct dlm_lock_resource
*)ret_data
;
2034 spin_lock(&res
->spinlock
);
2035 res
->state
&= ~DLM_LOCK_RES_SETREF_INPROG
;
2036 spin_unlock(&res
->spinlock
);
2038 dlm_lockres_put(res
);
2043 int dlm_dispatch_assert_master(struct dlm_ctxt
*dlm
,
2044 struct dlm_lock_resource
*res
,
2045 int ignore_higher
, u8 request_from
, u32 flags
)
2047 struct dlm_work_item
*item
;
2048 item
= kzalloc(sizeof(*item
), GFP_NOFS
);
2053 /* queue up work for dlm_assert_master_worker */
2054 dlm_grab(dlm
); /* get an extra ref for the work item */
2055 dlm_init_work_item(dlm
, item
, dlm_assert_master_worker
, NULL
);
2056 item
->u
.am
.lockres
= res
; /* already have a ref */
2057 /* can optionally ignore node numbers higher than this node */
2058 item
->u
.am
.ignore_higher
= ignore_higher
;
2059 item
->u
.am
.request_from
= request_from
;
2060 item
->u
.am
.flags
= flags
;
2063 mlog(0, "IGNORE HIGHER: %.*s\n", res
->lockname
.len
,
2064 res
->lockname
.name
);
2066 spin_lock(&dlm
->work_lock
);
2067 list_add_tail(&item
->list
, &dlm
->work_list
);
2068 spin_unlock(&dlm
->work_lock
);
2070 queue_work(dlm
->dlm_worker
, &dlm
->dispatched_work
);
2074 static void dlm_assert_master_worker(struct dlm_work_item
*item
, void *data
)
2076 struct dlm_ctxt
*dlm
= data
;
2078 struct dlm_lock_resource
*res
;
2079 unsigned long nodemap
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
2086 res
= item
->u
.am
.lockres
;
2087 ignore_higher
= item
->u
.am
.ignore_higher
;
2088 request_from
= item
->u
.am
.request_from
;
2089 flags
= item
->u
.am
.flags
;
2091 spin_lock(&dlm
->spinlock
);
2092 memcpy(nodemap
, dlm
->domain_map
, sizeof(nodemap
));
2093 spin_unlock(&dlm
->spinlock
);
2095 clear_bit(dlm
->node_num
, nodemap
);
2096 if (ignore_higher
) {
2097 /* if is this just to clear up mles for nodes below
2098 * this node, do not send the message to the original
2099 * caller or any node number higher than this */
2100 clear_bit(request_from
, nodemap
);
2101 bit
= dlm
->node_num
;
2103 bit
= find_next_bit(nodemap
, O2NM_MAX_NODES
,
2105 if (bit
>= O2NM_MAX_NODES
)
2107 clear_bit(bit
, nodemap
);
2112 * If we're migrating this lock to someone else, we are no
2113 * longer allowed to assert out own mastery. OTOH, we need to
2114 * prevent migration from starting while we're still asserting
2115 * our dominance. The reserved ast delays migration.
2117 spin_lock(&res
->spinlock
);
2118 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
2119 mlog(0, "Someone asked us to assert mastery, but we're "
2120 "in the middle of migration. Skipping assert, "
2121 "the new master will handle that.\n");
2122 spin_unlock(&res
->spinlock
);
2125 __dlm_lockres_reserve_ast(res
);
2126 spin_unlock(&res
->spinlock
);
2128 /* this call now finishes out the nodemap
2129 * even if one or more nodes die */
2130 mlog(0, "worker about to master %.*s here, this=%u\n",
2131 res
->lockname
.len
, res
->lockname
.name
, dlm
->node_num
);
2132 ret
= dlm_do_assert_master(dlm
, res
, nodemap
, flags
);
2134 /* no need to restart, we are done */
2135 if (!dlm_is_host_down(ret
))
2139 /* Ok, we've asserted ourselves. Let's let migration start. */
2140 dlm_lockres_release_ast(dlm
, res
);
2143 dlm_lockres_put(res
);
2145 mlog(0, "finished with dlm_assert_master_worker\n");
2148 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2149 * We cannot wait for node recovery to complete to begin mastering this
2150 * lockres because this lockres is used to kick off recovery! ;-)
2151 * So, do a pre-check on all living nodes to see if any of those nodes
2152 * think that $RECOVERY is currently mastered by a dead node. If so,
2153 * we wait a short time to allow that node to get notified by its own
2154 * heartbeat stack, then check again. All $RECOVERY lock resources
2155 * mastered by dead nodes are purged when the hearbeat callback is
2156 * fired, so we can know for sure that it is safe to continue once
2157 * the node returns a live node or no node. */
2158 static int dlm_pre_master_reco_lockres(struct dlm_ctxt
*dlm
,
2159 struct dlm_lock_resource
*res
)
2161 struct dlm_node_iter iter
;
2164 u8 master
= DLM_LOCK_RES_OWNER_UNKNOWN
;
2166 spin_lock(&dlm
->spinlock
);
2167 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2168 spin_unlock(&dlm
->spinlock
);
2170 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
2171 /* do not send to self */
2172 if (nodenum
== dlm
->node_num
)
2174 ret
= dlm_do_master_requery(dlm
, res
, nodenum
, &master
);
2177 if (!dlm_is_host_down(ret
))
2179 /* host is down, so answer for that node would be
2180 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2184 if (master
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
2185 /* check to see if this master is in the recovery map */
2186 spin_lock(&dlm
->spinlock
);
2187 if (test_bit(master
, dlm
->recovery_map
)) {
2188 mlog(ML_NOTICE
, "%s: node %u has not seen "
2189 "node %u go down yet, and thinks the "
2190 "dead node is mastering the recovery "
2191 "lock. must wait.\n", dlm
->name
,
2195 spin_unlock(&dlm
->spinlock
);
2196 mlog(0, "%s: reco lock master is %u\n", dlm
->name
,
2205 * DLM_DEREF_LOCKRES_MSG
2208 int dlm_drop_lockres_ref(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
)
2210 struct dlm_deref_lockres deref
;
2212 const char *lockname
;
2213 unsigned int namelen
;
2215 lockname
= res
->lockname
.name
;
2216 namelen
= res
->lockname
.len
;
2217 BUG_ON(namelen
> O2NM_MAX_NAME_LEN
);
2219 mlog(0, "%s:%.*s: sending deref to %d\n",
2220 dlm
->name
, namelen
, lockname
, res
->owner
);
2221 memset(&deref
, 0, sizeof(deref
));
2222 deref
.node_idx
= dlm
->node_num
;
2223 deref
.namelen
= namelen
;
2224 memcpy(deref
.name
, lockname
, namelen
);
2226 ret
= o2net_send_message(DLM_DEREF_LOCKRES_MSG
, dlm
->key
,
2227 &deref
, sizeof(deref
), res
->owner
, &r
);
2231 /* BAD. other node says I did not have a ref. */
2232 mlog(ML_ERROR
,"while dropping ref on %s:%.*s "
2233 "(master=%u) got %d.\n", dlm
->name
, namelen
,
2234 lockname
, res
->owner
, r
);
2235 dlm_print_one_lock_resource(res
);
2241 int dlm_deref_lockres_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
2244 struct dlm_ctxt
*dlm
= data
;
2245 struct dlm_deref_lockres
*deref
= (struct dlm_deref_lockres
*)msg
->buf
;
2246 struct dlm_lock_resource
*res
= NULL
;
2248 unsigned int namelen
;
2252 struct dlm_work_item
*item
;
2260 namelen
= deref
->namelen
;
2261 node
= deref
->node_idx
;
2263 if (namelen
> DLM_LOCKID_NAME_MAX
) {
2264 mlog(ML_ERROR
, "Invalid name length!");
2267 if (deref
->node_idx
>= O2NM_MAX_NODES
) {
2268 mlog(ML_ERROR
, "Invalid node number: %u\n", node
);
2272 hash
= dlm_lockid_hash(name
, namelen
);
2274 spin_lock(&dlm
->spinlock
);
2275 res
= __dlm_lookup_lockres_full(dlm
, name
, namelen
, hash
);
2277 spin_unlock(&dlm
->spinlock
);
2278 mlog(ML_ERROR
, "%s:%.*s: bad lockres name\n",
2279 dlm
->name
, namelen
, name
);
2282 spin_unlock(&dlm
->spinlock
);
2284 spin_lock(&res
->spinlock
);
2285 if (res
->state
& DLM_LOCK_RES_SETREF_INPROG
)
2288 BUG_ON(res
->state
& DLM_LOCK_RES_DROPPING_REF
);
2289 if (test_bit(node
, res
->refmap
)) {
2290 dlm_lockres_clear_refmap_bit(node
, res
);
2294 spin_unlock(&res
->spinlock
);
2298 dlm_lockres_calc_usage(dlm
, res
);
2300 mlog(ML_ERROR
, "%s:%.*s: node %u trying to drop ref "
2301 "but it is already dropped!\n", dlm
->name
,
2302 res
->lockname
.len
, res
->lockname
.name
, node
);
2303 dlm_print_one_lock_resource(res
);
2309 item
= kzalloc(sizeof(*item
), GFP_NOFS
);
2316 dlm_init_work_item(dlm
, item
, dlm_deref_lockres_worker
, NULL
);
2317 item
->u
.dl
.deref_res
= res
;
2318 item
->u
.dl
.deref_node
= node
;
2320 spin_lock(&dlm
->work_lock
);
2321 list_add_tail(&item
->list
, &dlm
->work_list
);
2322 spin_unlock(&dlm
->work_lock
);
2324 queue_work(dlm
->dlm_worker
, &dlm
->dispatched_work
);
2329 dlm_lockres_put(res
);
2335 static void dlm_deref_lockres_worker(struct dlm_work_item
*item
, void *data
)
2337 struct dlm_ctxt
*dlm
;
2338 struct dlm_lock_resource
*res
;
2343 res
= item
->u
.dl
.deref_res
;
2344 node
= item
->u
.dl
.deref_node
;
2346 spin_lock(&res
->spinlock
);
2347 BUG_ON(res
->state
& DLM_LOCK_RES_DROPPING_REF
);
2348 if (test_bit(node
, res
->refmap
)) {
2349 __dlm_wait_on_lockres_flags(res
, DLM_LOCK_RES_SETREF_INPROG
);
2350 dlm_lockres_clear_refmap_bit(node
, res
);
2353 spin_unlock(&res
->spinlock
);
2356 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2357 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
, node
);
2358 dlm_lockres_calc_usage(dlm
, res
);
2360 mlog(ML_ERROR
, "%s:%.*s: node %u trying to drop ref "
2361 "but it is already dropped!\n", dlm
->name
,
2362 res
->lockname
.len
, res
->lockname
.name
, node
);
2363 dlm_print_one_lock_resource(res
);
2366 dlm_lockres_put(res
);
2369 /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
2370 * if not. If 0, numlocks is set to the number of locks in the lockres.
2372 static int dlm_is_lockres_migrateable(struct dlm_ctxt
*dlm
,
2373 struct dlm_lock_resource
*res
,
2379 struct list_head
*queue
;
2380 struct dlm_lock
*lock
;
2382 assert_spin_locked(&res
->spinlock
);
2385 if (res
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
2386 mlog(0, "cannot migrate lockres with unknown owner!\n");
2390 if (res
->owner
!= dlm
->node_num
) {
2391 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2396 queue
= &res
->granted
;
2397 for (i
= 0; i
< 3; i
++) {
2398 list_for_each_entry(lock
, queue
, list
) {
2400 if (lock
->ml
.node
== dlm
->node_num
) {
2401 mlog(0, "found a lock owned by this node still "
2402 "on the %s queue! will not migrate this "
2403 "lockres\n", (i
== 0 ? "granted" :
2404 (i
== 1 ? "converting" :
2414 mlog(0, "migrateable lockres having %d locks\n", *numlocks
);
2421 * DLM_MIGRATE_LOCKRES
2425 static int dlm_migrate_lockres(struct dlm_ctxt
*dlm
,
2426 struct dlm_lock_resource
*res
,
2429 struct dlm_master_list_entry
*mle
= NULL
;
2430 struct dlm_master_list_entry
*oldmle
= NULL
;
2431 struct dlm_migratable_lockres
*mres
= NULL
;
2434 unsigned int namelen
;
2442 name
= res
->lockname
.name
;
2443 namelen
= res
->lockname
.len
;
2445 mlog(0, "migrating %.*s to %u\n", namelen
, name
, target
);
2448 * ensure this lockres is a proper candidate for migration
2450 spin_lock(&res
->spinlock
);
2451 ret
= dlm_is_lockres_migrateable(dlm
, res
, &numlocks
);
2453 spin_unlock(&res
->spinlock
);
2456 spin_unlock(&res
->spinlock
);
2459 if (numlocks
== 0) {
2460 mlog(0, "no locks were found on this lockres! done!\n");
2465 * preallocate up front
2466 * if this fails, abort
2470 mres
= (struct dlm_migratable_lockres
*) __get_free_page(GFP_NOFS
);
2476 mle
= (struct dlm_master_list_entry
*) kmem_cache_alloc(dlm_mle_cache
,
2485 * find a node to migrate the lockres to
2488 mlog(0, "picking a migration node\n");
2489 spin_lock(&dlm
->spinlock
);
2490 /* pick a new node */
2491 if (!test_bit(target
, dlm
->domain_map
) ||
2492 target
>= O2NM_MAX_NODES
) {
2493 target
= dlm_pick_migration_target(dlm
, res
);
2495 mlog(0, "node %u chosen for migration\n", target
);
2497 if (target
>= O2NM_MAX_NODES
||
2498 !test_bit(target
, dlm
->domain_map
)) {
2499 /* target chosen is not alive */
2504 spin_unlock(&dlm
->spinlock
);
2508 mlog(0, "continuing with target = %u\n", target
);
2511 * clear any existing master requests and
2512 * add the migration mle to the list
2514 spin_lock(&dlm
->master_lock
);
2515 ret
= dlm_add_migration_mle(dlm
, res
, mle
, &oldmle
, name
,
2516 namelen
, target
, dlm
->node_num
);
2517 spin_unlock(&dlm
->master_lock
);
2518 spin_unlock(&dlm
->spinlock
);
2520 if (ret
== -EEXIST
) {
2521 mlog(0, "another process is already migrating it\n");
2527 * set the MIGRATING flag and flush asts
2528 * if we fail after this we need to re-dirty the lockres
2530 if (dlm_mark_lockres_migrating(dlm
, res
, target
) < 0) {
2531 mlog(ML_ERROR
, "tried to migrate %.*s to %u, but "
2532 "the target went down.\n", res
->lockname
.len
,
2533 res
->lockname
.name
, target
);
2534 spin_lock(&res
->spinlock
);
2535 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2537 spin_unlock(&res
->spinlock
);
2543 /* master is known, detach if not already detached */
2544 dlm_mle_detach_hb_events(dlm
, oldmle
);
2545 dlm_put_mle(oldmle
);
2550 dlm_mle_detach_hb_events(dlm
, mle
);
2553 kmem_cache_free(dlm_mle_cache
, mle
);
2559 * at this point, we have a migration target, an mle
2560 * in the master list, and the MIGRATING flag set on
2564 /* now that remote nodes are spinning on the MIGRATING flag,
2565 * ensure that all assert_master work is flushed. */
2566 flush_workqueue(dlm
->dlm_worker
);
2568 /* get an extra reference on the mle.
2569 * otherwise the assert_master from the new
2570 * master will destroy this.
2571 * also, make sure that all callers of dlm_get_mle
2572 * take both dlm->spinlock and dlm->master_lock */
2573 spin_lock(&dlm
->spinlock
);
2574 spin_lock(&dlm
->master_lock
);
2575 dlm_get_mle_inuse(mle
);
2576 spin_unlock(&dlm
->master_lock
);
2577 spin_unlock(&dlm
->spinlock
);
2579 /* notify new node and send all lock state */
2580 /* call send_one_lockres with migration flag.
2581 * this serves as notice to the target node that a
2582 * migration is starting. */
2583 ret
= dlm_send_one_lockres(dlm
, res
, mres
, target
,
2584 DLM_MRES_MIGRATION
);
2587 mlog(0, "migration to node %u failed with %d\n",
2589 /* migration failed, detach and clean up mle */
2590 dlm_mle_detach_hb_events(dlm
, mle
);
2592 dlm_put_mle_inuse(mle
);
2593 spin_lock(&res
->spinlock
);
2594 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2596 spin_unlock(&res
->spinlock
);
2600 /* at this point, the target sends a message to all nodes,
2601 * (using dlm_do_migrate_request). this node is skipped since
2602 * we had to put an mle in the list to begin the process. this
2603 * node now waits for target to do an assert master. this node
2604 * will be the last one notified, ensuring that the migration
2605 * is complete everywhere. if the target dies while this is
2606 * going on, some nodes could potentially see the target as the
2607 * master, so it is important that my recovery finds the migration
2608 * mle and sets the master to UNKNONWN. */
2611 /* wait for new node to assert master */
2613 ret
= wait_event_interruptible_timeout(mle
->wq
,
2614 (atomic_read(&mle
->woken
) == 1),
2615 msecs_to_jiffies(5000));
2618 if (atomic_read(&mle
->woken
) == 1 ||
2619 res
->owner
== target
)
2622 mlog(0, "%s:%.*s: timed out during migration\n",
2623 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
2624 /* avoid hang during shutdown when migrating lockres
2625 * to a node which also goes down */
2626 if (dlm_is_node_dead(dlm
, target
)) {
2627 mlog(0, "%s:%.*s: expected migration "
2628 "target %u is no longer up, restarting\n",
2629 dlm
->name
, res
->lockname
.len
,
2630 res
->lockname
.name
, target
);
2632 /* migration failed, detach and clean up mle */
2633 dlm_mle_detach_hb_events(dlm
, mle
);
2635 dlm_put_mle_inuse(mle
);
2636 spin_lock(&res
->spinlock
);
2637 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2639 spin_unlock(&res
->spinlock
);
2643 mlog(0, "%s:%.*s: caught signal during migration\n",
2644 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
2647 /* all done, set the owner, clear the flag */
2648 spin_lock(&res
->spinlock
);
2649 dlm_set_lockres_owner(dlm
, res
, target
);
2650 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2651 dlm_remove_nonlocal_locks(dlm
, res
);
2652 spin_unlock(&res
->spinlock
);
2655 /* master is known, detach if not already detached */
2656 dlm_mle_detach_hb_events(dlm
, mle
);
2657 dlm_put_mle_inuse(mle
);
2660 dlm_lockres_calc_usage(dlm
, res
);
2663 /* re-dirty the lockres if we failed */
2665 dlm_kick_thread(dlm
, res
);
2667 /* wake up waiters if the MIGRATING flag got set
2668 * but migration failed */
2674 free_page((unsigned long)mres
);
2678 mlog(0, "returning %d\n", ret
);
2682 #define DLM_MIGRATION_RETRY_MS 100
2684 /* Should be called only after beginning the domain leave process.
2685 * There should not be any remaining locks on nonlocal lock resources,
2686 * and there should be no local locks left on locally mastered resources.
2688 * Called with the dlm spinlock held, may drop it to do migration, but
2689 * will re-acquire before exit.
2691 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2692 int dlm_empty_lockres(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
)
2695 int lock_dropped
= 0;
2698 spin_lock(&res
->spinlock
);
2699 if (res
->owner
!= dlm
->node_num
) {
2700 if (!__dlm_lockres_unused(res
)) {
2701 mlog(ML_ERROR
, "%s:%.*s: this node is not master, "
2702 "trying to free this but locks remain\n",
2703 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
2705 spin_unlock(&res
->spinlock
);
2709 /* No need to migrate a lockres having no locks */
2710 ret
= dlm_is_lockres_migrateable(dlm
, res
, &numlocks
);
2711 if (ret
>= 0 && numlocks
== 0) {
2712 spin_unlock(&res
->spinlock
);
2715 spin_unlock(&res
->spinlock
);
2717 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2718 spin_unlock(&dlm
->spinlock
);
2721 ret
= dlm_migrate_lockres(dlm
, res
, O2NM_MAX_NODES
);
2724 if (ret
== -ENOTEMPTY
) {
2725 mlog(ML_ERROR
, "lockres %.*s still has local locks!\n",
2726 res
->lockname
.len
, res
->lockname
.name
);
2730 mlog(0, "lockres %.*s: migrate failed, "
2731 "retrying\n", res
->lockname
.len
,
2732 res
->lockname
.name
);
2733 msleep(DLM_MIGRATION_RETRY_MS
);
2735 spin_lock(&dlm
->spinlock
);
2737 return lock_dropped
;
2740 int dlm_lock_basts_flushed(struct dlm_ctxt
*dlm
, struct dlm_lock
*lock
)
2743 spin_lock(&dlm
->ast_lock
);
2744 spin_lock(&lock
->spinlock
);
2745 ret
= (list_empty(&lock
->bast_list
) && !lock
->bast_pending
);
2746 spin_unlock(&lock
->spinlock
);
2747 spin_unlock(&dlm
->ast_lock
);
2751 static int dlm_migration_can_proceed(struct dlm_ctxt
*dlm
,
2752 struct dlm_lock_resource
*res
,
2756 spin_lock(&res
->spinlock
);
2757 can_proceed
= !!(res
->state
& DLM_LOCK_RES_MIGRATING
);
2758 spin_unlock(&res
->spinlock
);
2760 /* target has died, so make the caller break out of the
2761 * wait_event, but caller must recheck the domain_map */
2762 spin_lock(&dlm
->spinlock
);
2763 if (!test_bit(mig_target
, dlm
->domain_map
))
2765 spin_unlock(&dlm
->spinlock
);
2769 static int dlm_lockres_is_dirty(struct dlm_ctxt
*dlm
,
2770 struct dlm_lock_resource
*res
)
2773 spin_lock(&res
->spinlock
);
2774 ret
= !!(res
->state
& DLM_LOCK_RES_DIRTY
);
2775 spin_unlock(&res
->spinlock
);
2780 static int dlm_mark_lockres_migrating(struct dlm_ctxt
*dlm
,
2781 struct dlm_lock_resource
*res
,
2786 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2787 res
->lockname
.len
, res
->lockname
.name
, dlm
->node_num
,
2789 /* need to set MIGRATING flag on lockres. this is done by
2790 * ensuring that all asts have been flushed for this lockres. */
2791 spin_lock(&res
->spinlock
);
2792 BUG_ON(res
->migration_pending
);
2793 res
->migration_pending
= 1;
2794 /* strategy is to reserve an extra ast then release
2795 * it below, letting the release do all of the work */
2796 __dlm_lockres_reserve_ast(res
);
2797 spin_unlock(&res
->spinlock
);
2799 /* now flush all the pending asts */
2800 dlm_kick_thread(dlm
, res
);
2801 /* before waiting on DIRTY, block processes which may
2802 * try to dirty the lockres before MIGRATING is set */
2803 spin_lock(&res
->spinlock
);
2804 BUG_ON(res
->state
& DLM_LOCK_RES_BLOCK_DIRTY
);
2805 res
->state
|= DLM_LOCK_RES_BLOCK_DIRTY
;
2806 spin_unlock(&res
->spinlock
);
2807 /* now wait on any pending asts and the DIRTY state */
2808 wait_event(dlm
->ast_wq
, !dlm_lockres_is_dirty(dlm
, res
));
2809 dlm_lockres_release_ast(dlm
, res
);
2811 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2812 res
->state
& DLM_LOCK_RES_DIRTY
? "yes" : "no");
2813 /* if the extra ref we just put was the final one, this
2814 * will pass thru immediately. otherwise, we need to wait
2815 * for the last ast to finish. */
2817 ret
= wait_event_interruptible_timeout(dlm
->migration_wq
,
2818 dlm_migration_can_proceed(dlm
, res
, target
),
2819 msecs_to_jiffies(1000));
2821 mlog(0, "woken again: migrating? %s, dead? %s\n",
2822 res
->state
& DLM_LOCK_RES_MIGRATING
? "yes":"no",
2823 test_bit(target
, dlm
->domain_map
) ? "no":"yes");
2825 mlog(0, "all is well: migrating? %s, dead? %s\n",
2826 res
->state
& DLM_LOCK_RES_MIGRATING
? "yes":"no",
2827 test_bit(target
, dlm
->domain_map
) ? "no":"yes");
2829 if (!dlm_migration_can_proceed(dlm
, res
, target
)) {
2830 mlog(0, "trying again...\n");
2833 /* now that we are sure the MIGRATING state is there, drop
2834 * the unneded state which blocked threads trying to DIRTY */
2835 spin_lock(&res
->spinlock
);
2836 BUG_ON(!(res
->state
& DLM_LOCK_RES_BLOCK_DIRTY
));
2837 BUG_ON(!(res
->state
& DLM_LOCK_RES_MIGRATING
));
2838 res
->state
&= ~DLM_LOCK_RES_BLOCK_DIRTY
;
2839 spin_unlock(&res
->spinlock
);
2841 /* did the target go down or die? */
2842 spin_lock(&dlm
->spinlock
);
2843 if (!test_bit(target
, dlm
->domain_map
)) {
2844 mlog(ML_ERROR
, "aha. migration target %u just went down\n",
2848 spin_unlock(&dlm
->spinlock
);
2853 * o the DLM_LOCK_RES_MIGRATING flag is set
2854 * o there are no pending asts on this lockres
2855 * o all processes trying to reserve an ast on this
2856 * lockres must wait for the MIGRATING flag to clear
2861 /* last step in the migration process.
2862 * original master calls this to free all of the dlm_lock
2863 * structures that used to be for other nodes. */
2864 static void dlm_remove_nonlocal_locks(struct dlm_ctxt
*dlm
,
2865 struct dlm_lock_resource
*res
)
2867 struct list_head
*queue
= &res
->granted
;
2869 struct dlm_lock
*lock
, *next
;
2871 assert_spin_locked(&res
->spinlock
);
2873 BUG_ON(res
->owner
== dlm
->node_num
);
2875 for (i
=0; i
<3; i
++) {
2876 list_for_each_entry_safe(lock
, next
, queue
, list
) {
2877 if (lock
->ml
.node
!= dlm
->node_num
) {
2878 mlog(0, "putting lock for node %u\n",
2880 /* be extra careful */
2881 BUG_ON(!list_empty(&lock
->ast_list
));
2882 BUG_ON(!list_empty(&lock
->bast_list
));
2883 BUG_ON(lock
->ast_pending
);
2884 BUG_ON(lock
->bast_pending
);
2885 dlm_lockres_clear_refmap_bit(lock
->ml
.node
, res
);
2886 list_del_init(&lock
->list
);
2888 /* In a normal unlock, we would have added a
2889 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2897 bit
= find_next_bit(res
->refmap
, O2NM_MAX_NODES
, bit
);
2898 if (bit
>= O2NM_MAX_NODES
)
2900 /* do not clear the local node reference, if there is a
2901 * process holding this, let it drop the ref itself */
2902 if (bit
!= dlm
->node_num
) {
2903 mlog(0, "%s:%.*s: node %u had a ref to this "
2904 "migrating lockres, clearing\n", dlm
->name
,
2905 res
->lockname
.len
, res
->lockname
.name
, bit
);
2906 dlm_lockres_clear_refmap_bit(bit
, res
);
2912 /* for now this is not too intelligent. we will
2913 * need stats to make this do the right thing.
2914 * this just finds the first lock on one of the
2915 * queues and uses that node as the target. */
2916 static u8
dlm_pick_migration_target(struct dlm_ctxt
*dlm
,
2917 struct dlm_lock_resource
*res
)
2920 struct list_head
*queue
= &res
->granted
;
2921 struct dlm_lock
*lock
;
2924 assert_spin_locked(&dlm
->spinlock
);
2926 spin_lock(&res
->spinlock
);
2927 for (i
=0; i
<3; i
++) {
2928 list_for_each_entry(lock
, queue
, list
) {
2929 /* up to the caller to make sure this node
2931 if (lock
->ml
.node
!= dlm
->node_num
) {
2932 spin_unlock(&res
->spinlock
);
2933 return lock
->ml
.node
;
2938 spin_unlock(&res
->spinlock
);
2939 mlog(0, "have not found a suitable target yet! checking domain map\n");
2941 /* ok now we're getting desperate. pick anyone alive. */
2944 nodenum
= find_next_bit(dlm
->domain_map
,
2945 O2NM_MAX_NODES
, nodenum
+1);
2946 mlog(0, "found %d in domain map\n", nodenum
);
2947 if (nodenum
>= O2NM_MAX_NODES
)
2949 if (nodenum
!= dlm
->node_num
) {
2950 mlog(0, "picking %d\n", nodenum
);
2955 mlog(0, "giving up. no master to migrate to\n");
2956 return DLM_LOCK_RES_OWNER_UNKNOWN
;
2961 /* this is called by the new master once all lockres
2962 * data has been received */
2963 static int dlm_do_migrate_request(struct dlm_ctxt
*dlm
,
2964 struct dlm_lock_resource
*res
,
2965 u8 master
, u8 new_master
,
2966 struct dlm_node_iter
*iter
)
2968 struct dlm_migrate_request migrate
;
2969 int ret
, skip
, status
= 0;
2972 memset(&migrate
, 0, sizeof(migrate
));
2973 migrate
.namelen
= res
->lockname
.len
;
2974 memcpy(migrate
.name
, res
->lockname
.name
, migrate
.namelen
);
2975 migrate
.new_master
= new_master
;
2976 migrate
.master
= master
;
2980 /* send message to all nodes, except the master and myself */
2981 while ((nodenum
= dlm_node_iter_next(iter
)) >= 0) {
2982 if (nodenum
== master
||
2983 nodenum
== new_master
)
2986 /* We could race exit domain. If exited, skip. */
2987 spin_lock(&dlm
->spinlock
);
2988 skip
= (!test_bit(nodenum
, dlm
->domain_map
));
2989 spin_unlock(&dlm
->spinlock
);
2991 clear_bit(nodenum
, iter
->node_map
);
2995 ret
= o2net_send_message(DLM_MIGRATE_REQUEST_MSG
, dlm
->key
,
2996 &migrate
, sizeof(migrate
), nodenum
,
2999 mlog(0, "migrate_request returned %d!\n", ret
);
3000 if (!dlm_is_host_down(ret
)) {
3001 mlog(ML_ERROR
, "unhandled error=%d!\n", ret
);
3004 clear_bit(nodenum
, iter
->node_map
);
3006 } else if (status
< 0) {
3007 mlog(0, "migrate request (node %u) returned %d!\n",
3010 } else if (status
== DLM_MIGRATE_RESPONSE_MASTERY_REF
) {
3011 /* during the migration request we short-circuited
3012 * the mastery of the lockres. make sure we have
3013 * a mastery ref for nodenum */
3014 mlog(0, "%s:%.*s: need ref for node %u\n",
3015 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
3017 spin_lock(&res
->spinlock
);
3018 dlm_lockres_set_refmap_bit(nodenum
, res
);
3019 spin_unlock(&res
->spinlock
);
3026 mlog(0, "returning ret=%d\n", ret
);
3031 /* if there is an existing mle for this lockres, we now know who the master is.
3032 * (the one who sent us *this* message) we can clear it up right away.
3033 * since the process that put the mle on the list still has a reference to it,
3034 * we can unhash it now, set the master and wake the process. as a result,
3035 * we will have no mle in the list to start with. now we can add an mle for
3036 * the migration and this should be the only one found for those scanning the
3038 int dlm_migrate_request_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
3041 struct dlm_ctxt
*dlm
= data
;
3042 struct dlm_lock_resource
*res
= NULL
;
3043 struct dlm_migrate_request
*migrate
= (struct dlm_migrate_request
*) msg
->buf
;
3044 struct dlm_master_list_entry
*mle
= NULL
, *oldmle
= NULL
;
3046 unsigned int namelen
, hash
;
3052 name
= migrate
->name
;
3053 namelen
= migrate
->namelen
;
3054 hash
= dlm_lockid_hash(name
, namelen
);
3056 /* preallocate.. if this fails, abort */
3057 mle
= (struct dlm_master_list_entry
*) kmem_cache_alloc(dlm_mle_cache
,
3065 /* check for pre-existing lock */
3066 spin_lock(&dlm
->spinlock
);
3067 res
= __dlm_lookup_lockres(dlm
, name
, namelen
, hash
);
3068 spin_lock(&dlm
->master_lock
);
3071 spin_lock(&res
->spinlock
);
3072 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
3073 /* if all is working ok, this can only mean that we got
3074 * a migrate request from a node that we now see as
3075 * dead. what can we do here? drop it to the floor? */
3076 spin_unlock(&res
->spinlock
);
3077 mlog(ML_ERROR
, "Got a migrate request, but the "
3078 "lockres is marked as recovering!");
3079 kmem_cache_free(dlm_mle_cache
, mle
);
3080 ret
= -EINVAL
; /* need a better solution */
3083 res
->state
|= DLM_LOCK_RES_MIGRATING
;
3084 spin_unlock(&res
->spinlock
);
3087 /* ignore status. only nonzero status would BUG. */
3088 ret
= dlm_add_migration_mle(dlm
, res
, mle
, &oldmle
,
3090 migrate
->new_master
,
3094 spin_unlock(&dlm
->master_lock
);
3095 spin_unlock(&dlm
->spinlock
);
3098 /* master is known, detach if not already detached */
3099 dlm_mle_detach_hb_events(dlm
, oldmle
);
3100 dlm_put_mle(oldmle
);
3104 dlm_lockres_put(res
);
3110 /* must be holding dlm->spinlock and dlm->master_lock
3111 * when adding a migration mle, we can clear any other mles
3112 * in the master list because we know with certainty that
3113 * the master is "master". so we remove any old mle from
3114 * the list after setting it's master field, and then add
3115 * the new migration mle. this way we can hold with the rule
3116 * of having only one mle for a given lock name at all times. */
3117 static int dlm_add_migration_mle(struct dlm_ctxt
*dlm
,
3118 struct dlm_lock_resource
*res
,
3119 struct dlm_master_list_entry
*mle
,
3120 struct dlm_master_list_entry
**oldmle
,
3121 const char *name
, unsigned int namelen
,
3122 u8 new_master
, u8 master
)
3131 assert_spin_locked(&dlm
->spinlock
);
3132 assert_spin_locked(&dlm
->master_lock
);
3134 /* caller is responsible for any ref taken here on oldmle */
3135 found
= dlm_find_mle(dlm
, oldmle
, (char *)name
, namelen
);
3137 struct dlm_master_list_entry
*tmp
= *oldmle
;
3138 spin_lock(&tmp
->spinlock
);
3139 if (tmp
->type
== DLM_MLE_MIGRATION
) {
3140 if (master
== dlm
->node_num
) {
3141 /* ah another process raced me to it */
3142 mlog(0, "tried to migrate %.*s, but some "
3143 "process beat me to it\n",
3147 /* bad. 2 NODES are trying to migrate! */
3148 mlog(ML_ERROR
, "migration error mle: "
3149 "master=%u new_master=%u // request: "
3150 "master=%u new_master=%u // "
3152 tmp
->master
, tmp
->new_master
,
3158 /* this is essentially what assert_master does */
3159 tmp
->master
= master
;
3160 atomic_set(&tmp
->woken
, 1);
3162 /* remove it from the list so that only one
3163 * mle will be found */
3164 list_del_init(&tmp
->list
);
3165 /* this was obviously WRONG. mle is uninited here. should be tmp. */
3166 __dlm_mle_detach_hb_events(dlm
, tmp
);
3167 ret
= DLM_MIGRATE_RESPONSE_MASTERY_REF
;
3168 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3169 "telling master to get ref for cleared out mle "
3170 "during migration\n", dlm
->name
, namelen
, name
,
3171 master
, new_master
);
3173 spin_unlock(&tmp
->spinlock
);
3176 /* now add a migration mle to the tail of the list */
3177 dlm_init_mle(mle
, DLM_MLE_MIGRATION
, dlm
, res
, name
, namelen
);
3178 mle
->new_master
= new_master
;
3179 /* the new master will be sending an assert master for this.
3180 * at that point we will get the refmap reference */
3181 mle
->master
= master
;
3182 /* do this for consistency with other mle types */
3183 set_bit(new_master
, mle
->maybe_map
);
3184 list_add(&mle
->list
, &dlm
->master_list
);
3190 void dlm_clean_master_list(struct dlm_ctxt
*dlm
, u8 dead_node
)
3192 struct dlm_master_list_entry
*mle
, *next
;
3193 struct dlm_lock_resource
*res
;
3196 mlog_entry("dlm=%s, dead node=%u\n", dlm
->name
, dead_node
);
3198 assert_spin_locked(&dlm
->spinlock
);
3200 /* clean the master list */
3201 spin_lock(&dlm
->master_lock
);
3202 list_for_each_entry_safe(mle
, next
, &dlm
->master_list
, list
) {
3203 BUG_ON(mle
->type
!= DLM_MLE_BLOCK
&&
3204 mle
->type
!= DLM_MLE_MASTER
&&
3205 mle
->type
!= DLM_MLE_MIGRATION
);
3207 /* MASTER mles are initiated locally. the waiting
3208 * process will notice the node map change
3209 * shortly. let that happen as normal. */
3210 if (mle
->type
== DLM_MLE_MASTER
)
3214 /* BLOCK mles are initiated by other nodes.
3215 * need to clean up if the dead node would have
3216 * been the master. */
3217 if (mle
->type
== DLM_MLE_BLOCK
) {
3220 spin_lock(&mle
->spinlock
);
3221 bit
= find_next_bit(mle
->maybe_map
, O2NM_MAX_NODES
, 0);
3222 if (bit
!= dead_node
) {
3223 mlog(0, "mle found, but dead node %u would "
3224 "not have been master\n", dead_node
);
3225 spin_unlock(&mle
->spinlock
);
3227 /* must drop the refcount by one since the
3228 * assert_master will never arrive. this
3229 * may result in the mle being unlinked and
3230 * freed, but there may still be a process
3231 * waiting in the dlmlock path which is fine. */
3232 mlog(0, "node %u was expected master\n",
3234 atomic_set(&mle
->woken
, 1);
3235 spin_unlock(&mle
->spinlock
);
3237 /* do not need events any longer, so detach
3239 __dlm_mle_detach_hb_events(dlm
, mle
);
3245 /* everything else is a MIGRATION mle */
3247 /* the rule for MIGRATION mles is that the master
3248 * becomes UNKNOWN if *either* the original or
3249 * the new master dies. all UNKNOWN lockreses
3250 * are sent to whichever node becomes the recovery
3251 * master. the new master is responsible for
3252 * determining if there is still a master for
3253 * this lockres, or if he needs to take over
3254 * mastery. either way, this node should expect
3255 * another message to resolve this. */
3256 if (mle
->master
!= dead_node
&&
3257 mle
->new_master
!= dead_node
)
3260 /* if we have reached this point, this mle needs to
3261 * be removed from the list and freed. */
3263 /* remove from the list early. NOTE: unlinking
3264 * list_head while in list_for_each_safe */
3265 __dlm_mle_detach_hb_events(dlm
, mle
);
3266 spin_lock(&mle
->spinlock
);
3267 list_del_init(&mle
->list
);
3268 atomic_set(&mle
->woken
, 1);
3269 spin_unlock(&mle
->spinlock
);
3272 mlog(0, "%s: node %u died during migration from "
3273 "%u to %u!\n", dlm
->name
, dead_node
,
3274 mle
->master
, mle
->new_master
);
3275 /* if there is a lockres associated with this
3276 * mle, find it and set its owner to UNKNOWN */
3277 hash
= dlm_lockid_hash(mle
->u
.name
.name
, mle
->u
.name
.len
);
3278 res
= __dlm_lookup_lockres(dlm
, mle
->u
.name
.name
,
3279 mle
->u
.name
.len
, hash
);
3281 /* unfortunately if we hit this rare case, our
3282 * lock ordering is messed. we need to drop
3283 * the master lock so that we can take the
3284 * lockres lock, meaning that we will have to
3285 * restart from the head of list. */
3286 spin_unlock(&dlm
->master_lock
);
3288 /* move lockres onto recovery list */
3289 spin_lock(&res
->spinlock
);
3290 dlm_set_lockres_owner(dlm
, res
,
3291 DLM_LOCK_RES_OWNER_UNKNOWN
);
3292 dlm_move_lockres_to_recovery_list(dlm
, res
);
3293 spin_unlock(&res
->spinlock
);
3294 dlm_lockres_put(res
);
3296 /* about to get rid of mle, detach from heartbeat */
3297 __dlm_mle_detach_hb_events(dlm
, mle
);
3300 spin_lock(&dlm
->master_lock
);
3302 spin_unlock(&dlm
->master_lock
);
3308 /* this may be the last reference */
3311 spin_unlock(&dlm
->master_lock
);
3315 int dlm_finish_migration(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
3318 struct dlm_node_iter iter
;
3321 spin_lock(&dlm
->spinlock
);
3322 dlm_node_iter_init(dlm
->domain_map
, &iter
);
3323 clear_bit(old_master
, iter
.node_map
);
3324 clear_bit(dlm
->node_num
, iter
.node_map
);
3325 spin_unlock(&dlm
->spinlock
);
3327 /* ownership of the lockres is changing. account for the
3328 * mastery reference here since old_master will briefly have
3329 * a reference after the migration completes */
3330 spin_lock(&res
->spinlock
);
3331 dlm_lockres_set_refmap_bit(old_master
, res
);
3332 spin_unlock(&res
->spinlock
);
3334 mlog(0, "now time to do a migrate request to other nodes\n");
3335 ret
= dlm_do_migrate_request(dlm
, res
, old_master
,
3336 dlm
->node_num
, &iter
);
3342 mlog(0, "doing assert master of %.*s to all except the original node\n",
3343 res
->lockname
.len
, res
->lockname
.name
);
3344 /* this call now finishes out the nodemap
3345 * even if one or more nodes die */
3346 ret
= dlm_do_assert_master(dlm
, res
, iter
.node_map
,
3347 DLM_ASSERT_MASTER_FINISH_MIGRATION
);
3349 /* no longer need to retry. all living nodes contacted. */
3354 memset(iter
.node_map
, 0, sizeof(iter
.node_map
));
3355 set_bit(old_master
, iter
.node_map
);
3356 mlog(0, "doing assert master of %.*s back to %u\n",
3357 res
->lockname
.len
, res
->lockname
.name
, old_master
);
3358 ret
= dlm_do_assert_master(dlm
, res
, iter
.node_map
,
3359 DLM_ASSERT_MASTER_FINISH_MIGRATION
);
3361 mlog(0, "assert master to original master failed "
3363 /* the only nonzero status here would be because of
3364 * a dead original node. we're done. */
3368 /* all done, set the owner, clear the flag */
3369 spin_lock(&res
->spinlock
);
3370 dlm_set_lockres_owner(dlm
, res
, dlm
->node_num
);
3371 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
3372 spin_unlock(&res
->spinlock
);
3373 /* re-dirty it on the new master */
3374 dlm_kick_thread(dlm
, res
);
3381 * LOCKRES AST REFCOUNT
3382 * this is integral to migration
3385 /* for future intent to call an ast, reserve one ahead of time.
3386 * this should be called only after waiting on the lockres
3387 * with dlm_wait_on_lockres, and while still holding the
3388 * spinlock after the call. */
3389 void __dlm_lockres_reserve_ast(struct dlm_lock_resource
*res
)
3391 assert_spin_locked(&res
->spinlock
);
3392 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
3393 __dlm_print_one_lock_resource(res
);
3395 BUG_ON(res
->state
& DLM_LOCK_RES_MIGRATING
);
3397 atomic_inc(&res
->asts_reserved
);
3401 * used to drop the reserved ast, either because it went unused,
3402 * or because the ast/bast was actually called.
3404 * also, if there is a pending migration on this lockres,
3405 * and this was the last pending ast on the lockres,
3406 * atomically set the MIGRATING flag before we drop the lock.
3407 * this is how we ensure that migration can proceed with no
3408 * asts in progress. note that it is ok if the state of the
3409 * queues is such that a lock should be granted in the future
3410 * or that a bast should be fired, because the new master will
3411 * shuffle the lists on this lockres as soon as it is migrated.
3413 void dlm_lockres_release_ast(struct dlm_ctxt
*dlm
,
3414 struct dlm_lock_resource
*res
)
3416 if (!atomic_dec_and_lock(&res
->asts_reserved
, &res
->spinlock
))
3419 if (!res
->migration_pending
) {
3420 spin_unlock(&res
->spinlock
);
3424 BUG_ON(res
->state
& DLM_LOCK_RES_MIGRATING
);
3425 res
->migration_pending
= 0;
3426 res
->state
|= DLM_LOCK_RES_MIGRATING
;
3427 spin_unlock(&res
->spinlock
);
3429 wake_up(&dlm
->migration_wq
);