]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/ocfs2/dlm/dlmmaster.c
Fix common misspellings
[mirror_ubuntu-jammy-kernel.git] / fs / ocfs2 / dlm / dlmmaster.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmmod.c
5 *
6 * standalone DLM module
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
41
42
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
46
47 #include "dlmapi.h"
48 #include "dlmcommon.h"
49 #include "dlmdomain.h"
50 #include "dlmdebug.h"
51
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
54
55 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
56 struct dlm_master_list_entry *mle,
57 struct o2nm_node *node,
58 int idx);
59 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
60 struct dlm_master_list_entry *mle,
61 struct o2nm_node *node,
62 int idx);
63
64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
65 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
66 struct dlm_lock_resource *res,
67 void *nodemap, u32 flags);
68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
69
70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
71 struct dlm_master_list_entry *mle,
72 const char *name,
73 unsigned int namelen)
74 {
75 if (dlm != mle->dlm)
76 return 0;
77
78 if (namelen != mle->mnamelen ||
79 memcmp(name, mle->mname, namelen) != 0)
80 return 0;
81
82 return 1;
83 }
84
85 static struct kmem_cache *dlm_lockres_cache = NULL;
86 static struct kmem_cache *dlm_lockname_cache = NULL;
87 static struct kmem_cache *dlm_mle_cache = NULL;
88
89 static void dlm_mle_release(struct kref *kref);
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
91 enum dlm_mle_type type,
92 struct dlm_ctxt *dlm,
93 struct dlm_lock_resource *res,
94 const char *name,
95 unsigned int namelen);
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
98 static int dlm_find_mle(struct dlm_ctxt *dlm,
99 struct dlm_master_list_entry **mle,
100 char *name, unsigned int namelen);
101
102 static int dlm_do_master_request(struct dlm_lock_resource *res,
103 struct dlm_master_list_entry *mle, int to);
104
105
106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
107 struct dlm_lock_resource *res,
108 struct dlm_master_list_entry *mle,
109 int *blocked);
110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
111 struct dlm_lock_resource *res,
112 struct dlm_master_list_entry *mle,
113 int blocked);
114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
115 struct dlm_lock_resource *res,
116 struct dlm_master_list_entry *mle,
117 struct dlm_master_list_entry **oldmle,
118 const char *name, unsigned int namelen,
119 u8 new_master, u8 master);
120
121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
122 struct dlm_lock_resource *res);
123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
124 struct dlm_lock_resource *res);
125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
126 struct dlm_lock_resource *res,
127 u8 target);
128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
129 struct dlm_lock_resource *res);
130
131
132 int dlm_is_host_down(int errno)
133 {
134 switch (errno) {
135 case -EBADF:
136 case -ECONNREFUSED:
137 case -ENOTCONN:
138 case -ECONNRESET:
139 case -EPIPE:
140 case -EHOSTDOWN:
141 case -EHOSTUNREACH:
142 case -ETIMEDOUT:
143 case -ECONNABORTED:
144 case -ENETDOWN:
145 case -ENETUNREACH:
146 case -ENETRESET:
147 case -ESHUTDOWN:
148 case -ENOPROTOOPT:
149 case -EINVAL: /* if returned from our tcp code,
150 this means there is no socket */
151 return 1;
152 }
153 return 0;
154 }
155
156
157 /*
158 * MASTER LIST FUNCTIONS
159 */
160
161
162 /*
163 * regarding master list entries and heartbeat callbacks:
164 *
165 * in order to avoid sleeping and allocation that occurs in
166 * heartbeat, master list entries are simply attached to the
167 * dlm's established heartbeat callbacks. the mle is attached
168 * when it is created, and since the dlm->spinlock is held at
169 * that time, any heartbeat event will be properly discovered
170 * by the mle. the mle needs to be detached from the
171 * dlm->mle_hb_events list as soon as heartbeat events are no
172 * longer useful to the mle, and before the mle is freed.
173 *
174 * as a general rule, heartbeat events are no longer needed by
175 * the mle once an "answer" regarding the lock master has been
176 * received.
177 */
178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
179 struct dlm_master_list_entry *mle)
180 {
181 assert_spin_locked(&dlm->spinlock);
182
183 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
184 }
185
186
187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
188 struct dlm_master_list_entry *mle)
189 {
190 if (!list_empty(&mle->hb_events))
191 list_del_init(&mle->hb_events);
192 }
193
194
195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
196 struct dlm_master_list_entry *mle)
197 {
198 spin_lock(&dlm->spinlock);
199 __dlm_mle_detach_hb_events(dlm, mle);
200 spin_unlock(&dlm->spinlock);
201 }
202
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
204 {
205 struct dlm_ctxt *dlm;
206 dlm = mle->dlm;
207
208 assert_spin_locked(&dlm->spinlock);
209 assert_spin_locked(&dlm->master_lock);
210 mle->inuse++;
211 kref_get(&mle->mle_refs);
212 }
213
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
215 {
216 struct dlm_ctxt *dlm;
217 dlm = mle->dlm;
218
219 spin_lock(&dlm->spinlock);
220 spin_lock(&dlm->master_lock);
221 mle->inuse--;
222 __dlm_put_mle(mle);
223 spin_unlock(&dlm->master_lock);
224 spin_unlock(&dlm->spinlock);
225
226 }
227
228 /* remove from list and free */
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
230 {
231 struct dlm_ctxt *dlm;
232 dlm = mle->dlm;
233
234 assert_spin_locked(&dlm->spinlock);
235 assert_spin_locked(&dlm->master_lock);
236 if (!atomic_read(&mle->mle_refs.refcount)) {
237 /* this may or may not crash, but who cares.
238 * it's a BUG. */
239 mlog(ML_ERROR, "bad mle: %p\n", mle);
240 dlm_print_one_mle(mle);
241 BUG();
242 } else
243 kref_put(&mle->mle_refs, dlm_mle_release);
244 }
245
246
247 /* must not have any spinlocks coming in */
248 static void dlm_put_mle(struct dlm_master_list_entry *mle)
249 {
250 struct dlm_ctxt *dlm;
251 dlm = mle->dlm;
252
253 spin_lock(&dlm->spinlock);
254 spin_lock(&dlm->master_lock);
255 __dlm_put_mle(mle);
256 spin_unlock(&dlm->master_lock);
257 spin_unlock(&dlm->spinlock);
258 }
259
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
261 {
262 kref_get(&mle->mle_refs);
263 }
264
265 static void dlm_init_mle(struct dlm_master_list_entry *mle,
266 enum dlm_mle_type type,
267 struct dlm_ctxt *dlm,
268 struct dlm_lock_resource *res,
269 const char *name,
270 unsigned int namelen)
271 {
272 assert_spin_locked(&dlm->spinlock);
273
274 mle->dlm = dlm;
275 mle->type = type;
276 INIT_HLIST_NODE(&mle->master_hash_node);
277 INIT_LIST_HEAD(&mle->hb_events);
278 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
279 spin_lock_init(&mle->spinlock);
280 init_waitqueue_head(&mle->wq);
281 atomic_set(&mle->woken, 0);
282 kref_init(&mle->mle_refs);
283 memset(mle->response_map, 0, sizeof(mle->response_map));
284 mle->master = O2NM_MAX_NODES;
285 mle->new_master = O2NM_MAX_NODES;
286 mle->inuse = 0;
287
288 BUG_ON(mle->type != DLM_MLE_BLOCK &&
289 mle->type != DLM_MLE_MASTER &&
290 mle->type != DLM_MLE_MIGRATION);
291
292 if (mle->type == DLM_MLE_MASTER) {
293 BUG_ON(!res);
294 mle->mleres = res;
295 memcpy(mle->mname, res->lockname.name, res->lockname.len);
296 mle->mnamelen = res->lockname.len;
297 mle->mnamehash = res->lockname.hash;
298 } else {
299 BUG_ON(!name);
300 mle->mleres = NULL;
301 memcpy(mle->mname, name, namelen);
302 mle->mnamelen = namelen;
303 mle->mnamehash = dlm_lockid_hash(name, namelen);
304 }
305
306 atomic_inc(&dlm->mle_tot_count[mle->type]);
307 atomic_inc(&dlm->mle_cur_count[mle->type]);
308
309 /* copy off the node_map and register hb callbacks on our copy */
310 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
311 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
312 clear_bit(dlm->node_num, mle->vote_map);
313 clear_bit(dlm->node_num, mle->node_map);
314
315 /* attach the mle to the domain node up/down events */
316 __dlm_mle_attach_hb_events(dlm, mle);
317 }
318
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
320 {
321 assert_spin_locked(&dlm->spinlock);
322 assert_spin_locked(&dlm->master_lock);
323
324 if (!hlist_unhashed(&mle->master_hash_node))
325 hlist_del_init(&mle->master_hash_node);
326 }
327
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
329 {
330 struct hlist_head *bucket;
331
332 assert_spin_locked(&dlm->master_lock);
333
334 bucket = dlm_master_hash(dlm, mle->mnamehash);
335 hlist_add_head(&mle->master_hash_node, bucket);
336 }
337
338 /* returns 1 if found, 0 if not */
339 static int dlm_find_mle(struct dlm_ctxt *dlm,
340 struct dlm_master_list_entry **mle,
341 char *name, unsigned int namelen)
342 {
343 struct dlm_master_list_entry *tmpmle;
344 struct hlist_head *bucket;
345 struct hlist_node *list;
346 unsigned int hash;
347
348 assert_spin_locked(&dlm->master_lock);
349
350 hash = dlm_lockid_hash(name, namelen);
351 bucket = dlm_master_hash(dlm, hash);
352 hlist_for_each(list, bucket) {
353 tmpmle = hlist_entry(list, struct dlm_master_list_entry,
354 master_hash_node);
355 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
356 continue;
357 dlm_get_mle(tmpmle);
358 *mle = tmpmle;
359 return 1;
360 }
361 return 0;
362 }
363
364 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
365 {
366 struct dlm_master_list_entry *mle;
367
368 assert_spin_locked(&dlm->spinlock);
369
370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
371 if (node_up)
372 dlm_mle_node_up(dlm, mle, NULL, idx);
373 else
374 dlm_mle_node_down(dlm, mle, NULL, idx);
375 }
376 }
377
378 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
379 struct dlm_master_list_entry *mle,
380 struct o2nm_node *node, int idx)
381 {
382 spin_lock(&mle->spinlock);
383
384 if (!test_bit(idx, mle->node_map))
385 mlog(0, "node %u already removed from nodemap!\n", idx);
386 else
387 clear_bit(idx, mle->node_map);
388
389 spin_unlock(&mle->spinlock);
390 }
391
392 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
393 struct dlm_master_list_entry *mle,
394 struct o2nm_node *node, int idx)
395 {
396 spin_lock(&mle->spinlock);
397
398 if (test_bit(idx, mle->node_map))
399 mlog(0, "node %u already in node map!\n", idx);
400 else
401 set_bit(idx, mle->node_map);
402
403 spin_unlock(&mle->spinlock);
404 }
405
406
407 int dlm_init_mle_cache(void)
408 {
409 dlm_mle_cache = kmem_cache_create("o2dlm_mle",
410 sizeof(struct dlm_master_list_entry),
411 0, SLAB_HWCACHE_ALIGN,
412 NULL);
413 if (dlm_mle_cache == NULL)
414 return -ENOMEM;
415 return 0;
416 }
417
418 void dlm_destroy_mle_cache(void)
419 {
420 if (dlm_mle_cache)
421 kmem_cache_destroy(dlm_mle_cache);
422 }
423
424 static void dlm_mle_release(struct kref *kref)
425 {
426 struct dlm_master_list_entry *mle;
427 struct dlm_ctxt *dlm;
428
429 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
430 dlm = mle->dlm;
431
432 assert_spin_locked(&dlm->spinlock);
433 assert_spin_locked(&dlm->master_lock);
434
435 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
436 mle->type);
437
438 /* remove from list if not already */
439 __dlm_unlink_mle(dlm, mle);
440
441 /* detach the mle from the domain node up/down events */
442 __dlm_mle_detach_hb_events(dlm, mle);
443
444 atomic_dec(&dlm->mle_cur_count[mle->type]);
445
446 /* NOTE: kfree under spinlock here.
447 * if this is bad, we can move this to a freelist. */
448 kmem_cache_free(dlm_mle_cache, mle);
449 }
450
451
452 /*
453 * LOCK RESOURCE FUNCTIONS
454 */
455
456 int dlm_init_master_caches(void)
457 {
458 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
459 sizeof(struct dlm_lock_resource),
460 0, SLAB_HWCACHE_ALIGN, NULL);
461 if (!dlm_lockres_cache)
462 goto bail;
463
464 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
465 DLM_LOCKID_NAME_MAX, 0,
466 SLAB_HWCACHE_ALIGN, NULL);
467 if (!dlm_lockname_cache)
468 goto bail;
469
470 return 0;
471 bail:
472 dlm_destroy_master_caches();
473 return -ENOMEM;
474 }
475
476 void dlm_destroy_master_caches(void)
477 {
478 if (dlm_lockname_cache)
479 kmem_cache_destroy(dlm_lockname_cache);
480
481 if (dlm_lockres_cache)
482 kmem_cache_destroy(dlm_lockres_cache);
483 }
484
485 static void dlm_lockres_release(struct kref *kref)
486 {
487 struct dlm_lock_resource *res;
488 struct dlm_ctxt *dlm;
489
490 res = container_of(kref, struct dlm_lock_resource, refs);
491 dlm = res->dlm;
492
493 /* This should not happen -- all lockres' have a name
494 * associated with them at init time. */
495 BUG_ON(!res->lockname.name);
496
497 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
498 res->lockname.name);
499
500 spin_lock(&dlm->track_lock);
501 if (!list_empty(&res->tracking))
502 list_del_init(&res->tracking);
503 else {
504 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
505 res->lockname.len, res->lockname.name);
506 dlm_print_one_lock_resource(res);
507 }
508 spin_unlock(&dlm->track_lock);
509
510 atomic_dec(&dlm->res_cur_count);
511
512 if (!hlist_unhashed(&res->hash_node) ||
513 !list_empty(&res->granted) ||
514 !list_empty(&res->converting) ||
515 !list_empty(&res->blocked) ||
516 !list_empty(&res->dirty) ||
517 !list_empty(&res->recovering) ||
518 !list_empty(&res->purge)) {
519 mlog(ML_ERROR,
520 "Going to BUG for resource %.*s."
521 " We're on a list! [%c%c%c%c%c%c%c]\n",
522 res->lockname.len, res->lockname.name,
523 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
524 !list_empty(&res->granted) ? 'G' : ' ',
525 !list_empty(&res->converting) ? 'C' : ' ',
526 !list_empty(&res->blocked) ? 'B' : ' ',
527 !list_empty(&res->dirty) ? 'D' : ' ',
528 !list_empty(&res->recovering) ? 'R' : ' ',
529 !list_empty(&res->purge) ? 'P' : ' ');
530
531 dlm_print_one_lock_resource(res);
532 }
533
534 /* By the time we're ready to blow this guy away, we shouldn't
535 * be on any lists. */
536 BUG_ON(!hlist_unhashed(&res->hash_node));
537 BUG_ON(!list_empty(&res->granted));
538 BUG_ON(!list_empty(&res->converting));
539 BUG_ON(!list_empty(&res->blocked));
540 BUG_ON(!list_empty(&res->dirty));
541 BUG_ON(!list_empty(&res->recovering));
542 BUG_ON(!list_empty(&res->purge));
543
544 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
545
546 kmem_cache_free(dlm_lockres_cache, res);
547 }
548
549 void dlm_lockres_put(struct dlm_lock_resource *res)
550 {
551 kref_put(&res->refs, dlm_lockres_release);
552 }
553
554 static void dlm_init_lockres(struct dlm_ctxt *dlm,
555 struct dlm_lock_resource *res,
556 const char *name, unsigned int namelen)
557 {
558 char *qname;
559
560 /* If we memset here, we lose our reference to the kmalloc'd
561 * res->lockname.name, so be sure to init every field
562 * correctly! */
563
564 qname = (char *) res->lockname.name;
565 memcpy(qname, name, namelen);
566
567 res->lockname.len = namelen;
568 res->lockname.hash = dlm_lockid_hash(name, namelen);
569
570 init_waitqueue_head(&res->wq);
571 spin_lock_init(&res->spinlock);
572 INIT_HLIST_NODE(&res->hash_node);
573 INIT_LIST_HEAD(&res->granted);
574 INIT_LIST_HEAD(&res->converting);
575 INIT_LIST_HEAD(&res->blocked);
576 INIT_LIST_HEAD(&res->dirty);
577 INIT_LIST_HEAD(&res->recovering);
578 INIT_LIST_HEAD(&res->purge);
579 INIT_LIST_HEAD(&res->tracking);
580 atomic_set(&res->asts_reserved, 0);
581 res->migration_pending = 0;
582 res->inflight_locks = 0;
583
584 res->dlm = dlm;
585
586 kref_init(&res->refs);
587
588 atomic_inc(&dlm->res_tot_count);
589 atomic_inc(&dlm->res_cur_count);
590
591 /* just for consistency */
592 spin_lock(&res->spinlock);
593 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
594 spin_unlock(&res->spinlock);
595
596 res->state = DLM_LOCK_RES_IN_PROGRESS;
597
598 res->last_used = 0;
599
600 spin_lock(&dlm->spinlock);
601 list_add_tail(&res->tracking, &dlm->tracking_list);
602 spin_unlock(&dlm->spinlock);
603
604 memset(res->lvb, 0, DLM_LVB_LEN);
605 memset(res->refmap, 0, sizeof(res->refmap));
606 }
607
608 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
609 const char *name,
610 unsigned int namelen)
611 {
612 struct dlm_lock_resource *res = NULL;
613
614 res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
615 if (!res)
616 goto error;
617
618 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
619 if (!res->lockname.name)
620 goto error;
621
622 dlm_init_lockres(dlm, res, name, namelen);
623 return res;
624
625 error:
626 if (res && res->lockname.name)
627 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
628
629 if (res)
630 kmem_cache_free(dlm_lockres_cache, res);
631 return NULL;
632 }
633
634 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
635 struct dlm_lock_resource *res,
636 int new_lockres,
637 const char *file,
638 int line)
639 {
640 if (!new_lockres)
641 assert_spin_locked(&res->spinlock);
642
643 if (!test_bit(dlm->node_num, res->refmap)) {
644 BUG_ON(res->inflight_locks != 0);
645 dlm_lockres_set_refmap_bit(dlm->node_num, res);
646 }
647 res->inflight_locks++;
648 mlog(0, "%s:%.*s: inflight++: now %u\n",
649 dlm->name, res->lockname.len, res->lockname.name,
650 res->inflight_locks);
651 }
652
653 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
654 struct dlm_lock_resource *res,
655 const char *file,
656 int line)
657 {
658 assert_spin_locked(&res->spinlock);
659
660 BUG_ON(res->inflight_locks == 0);
661 res->inflight_locks--;
662 mlog(0, "%s:%.*s: inflight--: now %u\n",
663 dlm->name, res->lockname.len, res->lockname.name,
664 res->inflight_locks);
665 if (res->inflight_locks == 0)
666 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
667 wake_up(&res->wq);
668 }
669
670 /*
671 * lookup a lock resource by name.
672 * may already exist in the hashtable.
673 * lockid is null terminated
674 *
675 * if not, allocate enough for the lockres and for
676 * the temporary structure used in doing the mastering.
677 *
678 * also, do a lookup in the dlm->master_list to see
679 * if another node has begun mastering the same lock.
680 * if so, there should be a block entry in there
681 * for this name, and we should *not* attempt to master
682 * the lock here. need to wait around for that node
683 * to assert_master (or die).
684 *
685 */
686 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
687 const char *lockid,
688 int namelen,
689 int flags)
690 {
691 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
692 struct dlm_master_list_entry *mle = NULL;
693 struct dlm_master_list_entry *alloc_mle = NULL;
694 int blocked = 0;
695 int ret, nodenum;
696 struct dlm_node_iter iter;
697 unsigned int hash;
698 int tries = 0;
699 int bit, wait_on_recovery = 0;
700 int drop_inflight_if_nonlocal = 0;
701
702 BUG_ON(!lockid);
703
704 hash = dlm_lockid_hash(lockid, namelen);
705
706 mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
707
708 lookup:
709 spin_lock(&dlm->spinlock);
710 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
711 if (tmpres) {
712 int dropping_ref = 0;
713
714 spin_unlock(&dlm->spinlock);
715
716 spin_lock(&tmpres->spinlock);
717 /* We wait for the other thread that is mastering the resource */
718 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
719 __dlm_wait_on_lockres(tmpres);
720 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
721 }
722
723 if (tmpres->owner == dlm->node_num) {
724 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
725 dlm_lockres_grab_inflight_ref(dlm, tmpres);
726 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
727 dropping_ref = 1;
728 spin_unlock(&tmpres->spinlock);
729
730 /* wait until done messaging the master, drop our ref to allow
731 * the lockres to be purged, start over. */
732 if (dropping_ref) {
733 spin_lock(&tmpres->spinlock);
734 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
735 spin_unlock(&tmpres->spinlock);
736 dlm_lockres_put(tmpres);
737 tmpres = NULL;
738 goto lookup;
739 }
740
741 mlog(0, "found in hash!\n");
742 if (res)
743 dlm_lockres_put(res);
744 res = tmpres;
745 goto leave;
746 }
747
748 if (!res) {
749 spin_unlock(&dlm->spinlock);
750 mlog(0, "allocating a new resource\n");
751 /* nothing found and we need to allocate one. */
752 alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
753 if (!alloc_mle)
754 goto leave;
755 res = dlm_new_lockres(dlm, lockid, namelen);
756 if (!res)
757 goto leave;
758 goto lookup;
759 }
760
761 mlog(0, "no lockres found, allocated our own: %p\n", res);
762
763 if (flags & LKM_LOCAL) {
764 /* caller knows it's safe to assume it's not mastered elsewhere
765 * DONE! return right away */
766 spin_lock(&res->spinlock);
767 dlm_change_lockres_owner(dlm, res, dlm->node_num);
768 __dlm_insert_lockres(dlm, res);
769 dlm_lockres_grab_inflight_ref(dlm, res);
770 spin_unlock(&res->spinlock);
771 spin_unlock(&dlm->spinlock);
772 /* lockres still marked IN_PROGRESS */
773 goto wake_waiters;
774 }
775
776 /* check master list to see if another node has started mastering it */
777 spin_lock(&dlm->master_lock);
778
779 /* if we found a block, wait for lock to be mastered by another node */
780 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
781 if (blocked) {
782 int mig;
783 if (mle->type == DLM_MLE_MASTER) {
784 mlog(ML_ERROR, "master entry for nonexistent lock!\n");
785 BUG();
786 }
787 mig = (mle->type == DLM_MLE_MIGRATION);
788 /* if there is a migration in progress, let the migration
789 * finish before continuing. we can wait for the absence
790 * of the MIGRATION mle: either the migrate finished or
791 * one of the nodes died and the mle was cleaned up.
792 * if there is a BLOCK here, but it already has a master
793 * set, we are too late. the master does not have a ref
794 * for us in the refmap. detach the mle and drop it.
795 * either way, go back to the top and start over. */
796 if (mig || mle->master != O2NM_MAX_NODES) {
797 BUG_ON(mig && mle->master == dlm->node_num);
798 /* we arrived too late. the master does not
799 * have a ref for us. retry. */
800 mlog(0, "%s:%.*s: late on %s\n",
801 dlm->name, namelen, lockid,
802 mig ? "MIGRATION" : "BLOCK");
803 spin_unlock(&dlm->master_lock);
804 spin_unlock(&dlm->spinlock);
805
806 /* master is known, detach */
807 if (!mig)
808 dlm_mle_detach_hb_events(dlm, mle);
809 dlm_put_mle(mle);
810 mle = NULL;
811 /* this is lame, but we can't wait on either
812 * the mle or lockres waitqueue here */
813 if (mig)
814 msleep(100);
815 goto lookup;
816 }
817 } else {
818 /* go ahead and try to master lock on this node */
819 mle = alloc_mle;
820 /* make sure this does not get freed below */
821 alloc_mle = NULL;
822 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
823 set_bit(dlm->node_num, mle->maybe_map);
824 __dlm_insert_mle(dlm, mle);
825
826 /* still holding the dlm spinlock, check the recovery map
827 * to see if there are any nodes that still need to be
828 * considered. these will not appear in the mle nodemap
829 * but they might own this lockres. wait on them. */
830 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
831 if (bit < O2NM_MAX_NODES) {
832 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
833 "recover before lock mastery can begin\n",
834 dlm->name, namelen, (char *)lockid, bit);
835 wait_on_recovery = 1;
836 }
837 }
838
839 /* at this point there is either a DLM_MLE_BLOCK or a
840 * DLM_MLE_MASTER on the master list, so it's safe to add the
841 * lockres to the hashtable. anyone who finds the lock will
842 * still have to wait on the IN_PROGRESS. */
843
844 /* finally add the lockres to its hash bucket */
845 __dlm_insert_lockres(dlm, res);
846 /* since this lockres is new it doesn't not require the spinlock */
847 dlm_lockres_grab_inflight_ref_new(dlm, res);
848
849 /* if this node does not become the master make sure to drop
850 * this inflight reference below */
851 drop_inflight_if_nonlocal = 1;
852
853 /* get an extra ref on the mle in case this is a BLOCK
854 * if so, the creator of the BLOCK may try to put the last
855 * ref at this time in the assert master handler, so we
856 * need an extra one to keep from a bad ptr deref. */
857 dlm_get_mle_inuse(mle);
858 spin_unlock(&dlm->master_lock);
859 spin_unlock(&dlm->spinlock);
860
861 redo_request:
862 while (wait_on_recovery) {
863 /* any cluster changes that occurred after dropping the
864 * dlm spinlock would be detectable be a change on the mle,
865 * so we only need to clear out the recovery map once. */
866 if (dlm_is_recovery_lock(lockid, namelen)) {
867 mlog(ML_NOTICE, "%s: recovery map is not empty, but "
868 "must master $RECOVERY lock now\n", dlm->name);
869 if (!dlm_pre_master_reco_lockres(dlm, res))
870 wait_on_recovery = 0;
871 else {
872 mlog(0, "%s: waiting 500ms for heartbeat state "
873 "change\n", dlm->name);
874 msleep(500);
875 }
876 continue;
877 }
878
879 dlm_kick_recovery_thread(dlm);
880 msleep(1000);
881 dlm_wait_for_recovery(dlm);
882
883 spin_lock(&dlm->spinlock);
884 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
885 if (bit < O2NM_MAX_NODES) {
886 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
887 "recover before lock mastery can begin\n",
888 dlm->name, namelen, (char *)lockid, bit);
889 wait_on_recovery = 1;
890 } else
891 wait_on_recovery = 0;
892 spin_unlock(&dlm->spinlock);
893
894 if (wait_on_recovery)
895 dlm_wait_for_node_recovery(dlm, bit, 10000);
896 }
897
898 /* must wait for lock to be mastered elsewhere */
899 if (blocked)
900 goto wait;
901
902 ret = -EINVAL;
903 dlm_node_iter_init(mle->vote_map, &iter);
904 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
905 ret = dlm_do_master_request(res, mle, nodenum);
906 if (ret < 0)
907 mlog_errno(ret);
908 if (mle->master != O2NM_MAX_NODES) {
909 /* found a master ! */
910 if (mle->master <= nodenum)
911 break;
912 /* if our master request has not reached the master
913 * yet, keep going until it does. this is how the
914 * master will know that asserts are needed back to
915 * the lower nodes. */
916 mlog(0, "%s:%.*s: requests only up to %u but master "
917 "is %u, keep going\n", dlm->name, namelen,
918 lockid, nodenum, mle->master);
919 }
920 }
921
922 wait:
923 /* keep going until the response map includes all nodes */
924 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
925 if (ret < 0) {
926 wait_on_recovery = 1;
927 mlog(0, "%s:%.*s: node map changed, redo the "
928 "master request now, blocked=%d\n",
929 dlm->name, res->lockname.len,
930 res->lockname.name, blocked);
931 if (++tries > 20) {
932 mlog(ML_ERROR, "%s:%.*s: spinning on "
933 "dlm_wait_for_lock_mastery, blocked=%d\n",
934 dlm->name, res->lockname.len,
935 res->lockname.name, blocked);
936 dlm_print_one_lock_resource(res);
937 dlm_print_one_mle(mle);
938 tries = 0;
939 }
940 goto redo_request;
941 }
942
943 mlog(0, "lockres mastered by %u\n", res->owner);
944 /* make sure we never continue without this */
945 BUG_ON(res->owner == O2NM_MAX_NODES);
946
947 /* master is known, detach if not already detached */
948 dlm_mle_detach_hb_events(dlm, mle);
949 dlm_put_mle(mle);
950 /* put the extra ref */
951 dlm_put_mle_inuse(mle);
952
953 wake_waiters:
954 spin_lock(&res->spinlock);
955 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
956 dlm_lockres_drop_inflight_ref(dlm, res);
957 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
958 spin_unlock(&res->spinlock);
959 wake_up(&res->wq);
960
961 leave:
962 /* need to free the unused mle */
963 if (alloc_mle)
964 kmem_cache_free(dlm_mle_cache, alloc_mle);
965
966 return res;
967 }
968
969
970 #define DLM_MASTERY_TIMEOUT_MS 5000
971
972 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
973 struct dlm_lock_resource *res,
974 struct dlm_master_list_entry *mle,
975 int *blocked)
976 {
977 u8 m;
978 int ret, bit;
979 int map_changed, voting_done;
980 int assert, sleep;
981
982 recheck:
983 ret = 0;
984 assert = 0;
985
986 /* check if another node has already become the owner */
987 spin_lock(&res->spinlock);
988 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
989 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
990 res->lockname.len, res->lockname.name, res->owner);
991 spin_unlock(&res->spinlock);
992 /* this will cause the master to re-assert across
993 * the whole cluster, freeing up mles */
994 if (res->owner != dlm->node_num) {
995 ret = dlm_do_master_request(res, mle, res->owner);
996 if (ret < 0) {
997 /* give recovery a chance to run */
998 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
999 msleep(500);
1000 goto recheck;
1001 }
1002 }
1003 ret = 0;
1004 goto leave;
1005 }
1006 spin_unlock(&res->spinlock);
1007
1008 spin_lock(&mle->spinlock);
1009 m = mle->master;
1010 map_changed = (memcmp(mle->vote_map, mle->node_map,
1011 sizeof(mle->vote_map)) != 0);
1012 voting_done = (memcmp(mle->vote_map, mle->response_map,
1013 sizeof(mle->vote_map)) == 0);
1014
1015 /* restart if we hit any errors */
1016 if (map_changed) {
1017 int b;
1018 mlog(0, "%s: %.*s: node map changed, restarting\n",
1019 dlm->name, res->lockname.len, res->lockname.name);
1020 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1021 b = (mle->type == DLM_MLE_BLOCK);
1022 if ((*blocked && !b) || (!*blocked && b)) {
1023 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1024 dlm->name, res->lockname.len, res->lockname.name,
1025 *blocked, b);
1026 *blocked = b;
1027 }
1028 spin_unlock(&mle->spinlock);
1029 if (ret < 0) {
1030 mlog_errno(ret);
1031 goto leave;
1032 }
1033 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1034 "rechecking now\n", dlm->name, res->lockname.len,
1035 res->lockname.name);
1036 goto recheck;
1037 } else {
1038 if (!voting_done) {
1039 mlog(0, "map not changed and voting not done "
1040 "for %s:%.*s\n", dlm->name, res->lockname.len,
1041 res->lockname.name);
1042 }
1043 }
1044
1045 if (m != O2NM_MAX_NODES) {
1046 /* another node has done an assert!
1047 * all done! */
1048 sleep = 0;
1049 } else {
1050 sleep = 1;
1051 /* have all nodes responded? */
1052 if (voting_done && !*blocked) {
1053 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1054 if (dlm->node_num <= bit) {
1055 /* my node number is lowest.
1056 * now tell other nodes that I am
1057 * mastering this. */
1058 mle->master = dlm->node_num;
1059 /* ref was grabbed in get_lock_resource
1060 * will be dropped in dlmlock_master */
1061 assert = 1;
1062 sleep = 0;
1063 }
1064 /* if voting is done, but we have not received
1065 * an assert master yet, we must sleep */
1066 }
1067 }
1068
1069 spin_unlock(&mle->spinlock);
1070
1071 /* sleep if we haven't finished voting yet */
1072 if (sleep) {
1073 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1074
1075 /*
1076 if (atomic_read(&mle->mle_refs.refcount) < 2)
1077 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1078 atomic_read(&mle->mle_refs.refcount),
1079 res->lockname.len, res->lockname.name);
1080 */
1081 atomic_set(&mle->woken, 0);
1082 (void)wait_event_timeout(mle->wq,
1083 (atomic_read(&mle->woken) == 1),
1084 timeo);
1085 if (res->owner == O2NM_MAX_NODES) {
1086 mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1087 res->lockname.len, res->lockname.name);
1088 goto recheck;
1089 }
1090 mlog(0, "done waiting, master is %u\n", res->owner);
1091 ret = 0;
1092 goto leave;
1093 }
1094
1095 ret = 0; /* done */
1096 if (assert) {
1097 m = dlm->node_num;
1098 mlog(0, "about to master %.*s here, this=%u\n",
1099 res->lockname.len, res->lockname.name, m);
1100 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1101 if (ret) {
1102 /* This is a failure in the network path,
1103 * not in the response to the assert_master
1104 * (any nonzero response is a BUG on this node).
1105 * Most likely a socket just got disconnected
1106 * due to node death. */
1107 mlog_errno(ret);
1108 }
1109 /* no longer need to restart lock mastery.
1110 * all living nodes have been contacted. */
1111 ret = 0;
1112 }
1113
1114 /* set the lockres owner */
1115 spin_lock(&res->spinlock);
1116 /* mastery reference obtained either during
1117 * assert_master_handler or in get_lock_resource */
1118 dlm_change_lockres_owner(dlm, res, m);
1119 spin_unlock(&res->spinlock);
1120
1121 leave:
1122 return ret;
1123 }
1124
1125 struct dlm_bitmap_diff_iter
1126 {
1127 int curnode;
1128 unsigned long *orig_bm;
1129 unsigned long *cur_bm;
1130 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1131 };
1132
1133 enum dlm_node_state_change
1134 {
1135 NODE_DOWN = -1,
1136 NODE_NO_CHANGE = 0,
1137 NODE_UP
1138 };
1139
1140 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1141 unsigned long *orig_bm,
1142 unsigned long *cur_bm)
1143 {
1144 unsigned long p1, p2;
1145 int i;
1146
1147 iter->curnode = -1;
1148 iter->orig_bm = orig_bm;
1149 iter->cur_bm = cur_bm;
1150
1151 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1152 p1 = *(iter->orig_bm + i);
1153 p2 = *(iter->cur_bm + i);
1154 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1155 }
1156 }
1157
1158 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1159 enum dlm_node_state_change *state)
1160 {
1161 int bit;
1162
1163 if (iter->curnode >= O2NM_MAX_NODES)
1164 return -ENOENT;
1165
1166 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1167 iter->curnode+1);
1168 if (bit >= O2NM_MAX_NODES) {
1169 iter->curnode = O2NM_MAX_NODES;
1170 return -ENOENT;
1171 }
1172
1173 /* if it was there in the original then this node died */
1174 if (test_bit(bit, iter->orig_bm))
1175 *state = NODE_DOWN;
1176 else
1177 *state = NODE_UP;
1178
1179 iter->curnode = bit;
1180 return bit;
1181 }
1182
1183
1184 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1185 struct dlm_lock_resource *res,
1186 struct dlm_master_list_entry *mle,
1187 int blocked)
1188 {
1189 struct dlm_bitmap_diff_iter bdi;
1190 enum dlm_node_state_change sc;
1191 int node;
1192 int ret = 0;
1193
1194 mlog(0, "something happened such that the "
1195 "master process may need to be restarted!\n");
1196
1197 assert_spin_locked(&mle->spinlock);
1198
1199 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1200 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1201 while (node >= 0) {
1202 if (sc == NODE_UP) {
1203 /* a node came up. clear any old vote from
1204 * the response map and set it in the vote map
1205 * then restart the mastery. */
1206 mlog(ML_NOTICE, "node %d up while restarting\n", node);
1207
1208 /* redo the master request, but only for the new node */
1209 mlog(0, "sending request to new node\n");
1210 clear_bit(node, mle->response_map);
1211 set_bit(node, mle->vote_map);
1212 } else {
1213 mlog(ML_ERROR, "node down! %d\n", node);
1214 if (blocked) {
1215 int lowest = find_next_bit(mle->maybe_map,
1216 O2NM_MAX_NODES, 0);
1217
1218 /* act like it was never there */
1219 clear_bit(node, mle->maybe_map);
1220
1221 if (node == lowest) {
1222 mlog(0, "expected master %u died"
1223 " while this node was blocked "
1224 "waiting on it!\n", node);
1225 lowest = find_next_bit(mle->maybe_map,
1226 O2NM_MAX_NODES,
1227 lowest+1);
1228 if (lowest < O2NM_MAX_NODES) {
1229 mlog(0, "%s:%.*s:still "
1230 "blocked. waiting on %u "
1231 "now\n", dlm->name,
1232 res->lockname.len,
1233 res->lockname.name,
1234 lowest);
1235 } else {
1236 /* mle is an MLE_BLOCK, but
1237 * there is now nothing left to
1238 * block on. we need to return
1239 * all the way back out and try
1240 * again with an MLE_MASTER.
1241 * dlm_do_local_recovery_cleanup
1242 * has already run, so the mle
1243 * refcount is ok */
1244 mlog(0, "%s:%.*s: no "
1245 "longer blocking. try to "
1246 "master this here\n",
1247 dlm->name,
1248 res->lockname.len,
1249 res->lockname.name);
1250 mle->type = DLM_MLE_MASTER;
1251 mle->mleres = res;
1252 }
1253 }
1254 }
1255
1256 /* now blank out everything, as if we had never
1257 * contacted anyone */
1258 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1259 memset(mle->response_map, 0, sizeof(mle->response_map));
1260 /* reset the vote_map to the current node_map */
1261 memcpy(mle->vote_map, mle->node_map,
1262 sizeof(mle->node_map));
1263 /* put myself into the maybe map */
1264 if (mle->type != DLM_MLE_BLOCK)
1265 set_bit(dlm->node_num, mle->maybe_map);
1266 }
1267 ret = -EAGAIN;
1268 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1269 }
1270 return ret;
1271 }
1272
1273
1274 /*
1275 * DLM_MASTER_REQUEST_MSG
1276 *
1277 * returns: 0 on success,
1278 * -errno on a network error
1279 *
1280 * on error, the caller should assume the target node is "dead"
1281 *
1282 */
1283
1284 static int dlm_do_master_request(struct dlm_lock_resource *res,
1285 struct dlm_master_list_entry *mle, int to)
1286 {
1287 struct dlm_ctxt *dlm = mle->dlm;
1288 struct dlm_master_request request;
1289 int ret, response=0, resend;
1290
1291 memset(&request, 0, sizeof(request));
1292 request.node_idx = dlm->node_num;
1293
1294 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1295
1296 request.namelen = (u8)mle->mnamelen;
1297 memcpy(request.name, mle->mname, request.namelen);
1298
1299 again:
1300 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1301 sizeof(request), to, &response);
1302 if (ret < 0) {
1303 if (ret == -ESRCH) {
1304 /* should never happen */
1305 mlog(ML_ERROR, "TCP stack not ready!\n");
1306 BUG();
1307 } else if (ret == -EINVAL) {
1308 mlog(ML_ERROR, "bad args passed to o2net!\n");
1309 BUG();
1310 } else if (ret == -ENOMEM) {
1311 mlog(ML_ERROR, "out of memory while trying to send "
1312 "network message! retrying\n");
1313 /* this is totally crude */
1314 msleep(50);
1315 goto again;
1316 } else if (!dlm_is_host_down(ret)) {
1317 /* not a network error. bad. */
1318 mlog_errno(ret);
1319 mlog(ML_ERROR, "unhandled error!");
1320 BUG();
1321 }
1322 /* all other errors should be network errors,
1323 * and likely indicate node death */
1324 mlog(ML_ERROR, "link to %d went down!\n", to);
1325 goto out;
1326 }
1327
1328 ret = 0;
1329 resend = 0;
1330 spin_lock(&mle->spinlock);
1331 switch (response) {
1332 case DLM_MASTER_RESP_YES:
1333 set_bit(to, mle->response_map);
1334 mlog(0, "node %u is the master, response=YES\n", to);
1335 mlog(0, "%s:%.*s: master node %u now knows I have a "
1336 "reference\n", dlm->name, res->lockname.len,
1337 res->lockname.name, to);
1338 mle->master = to;
1339 break;
1340 case DLM_MASTER_RESP_NO:
1341 mlog(0, "node %u not master, response=NO\n", to);
1342 set_bit(to, mle->response_map);
1343 break;
1344 case DLM_MASTER_RESP_MAYBE:
1345 mlog(0, "node %u not master, response=MAYBE\n", to);
1346 set_bit(to, mle->response_map);
1347 set_bit(to, mle->maybe_map);
1348 break;
1349 case DLM_MASTER_RESP_ERROR:
1350 mlog(0, "node %u hit an error, resending\n", to);
1351 resend = 1;
1352 response = 0;
1353 break;
1354 default:
1355 mlog(ML_ERROR, "bad response! %u\n", response);
1356 BUG();
1357 }
1358 spin_unlock(&mle->spinlock);
1359 if (resend) {
1360 /* this is also totally crude */
1361 msleep(50);
1362 goto again;
1363 }
1364
1365 out:
1366 return ret;
1367 }
1368
1369 /*
1370 * locks that can be taken here:
1371 * dlm->spinlock
1372 * res->spinlock
1373 * mle->spinlock
1374 * dlm->master_list
1375 *
1376 * if possible, TRIM THIS DOWN!!!
1377 */
1378 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1379 void **ret_data)
1380 {
1381 u8 response = DLM_MASTER_RESP_MAYBE;
1382 struct dlm_ctxt *dlm = data;
1383 struct dlm_lock_resource *res = NULL;
1384 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1385 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1386 char *name;
1387 unsigned int namelen, hash;
1388 int found, ret;
1389 int set_maybe;
1390 int dispatch_assert = 0;
1391
1392 if (!dlm_grab(dlm))
1393 return DLM_MASTER_RESP_NO;
1394
1395 if (!dlm_domain_fully_joined(dlm)) {
1396 response = DLM_MASTER_RESP_NO;
1397 goto send_response;
1398 }
1399
1400 name = request->name;
1401 namelen = request->namelen;
1402 hash = dlm_lockid_hash(name, namelen);
1403
1404 if (namelen > DLM_LOCKID_NAME_MAX) {
1405 response = DLM_IVBUFLEN;
1406 goto send_response;
1407 }
1408
1409 way_up_top:
1410 spin_lock(&dlm->spinlock);
1411 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1412 if (res) {
1413 spin_unlock(&dlm->spinlock);
1414
1415 /* take care of the easy cases up front */
1416 spin_lock(&res->spinlock);
1417 if (res->state & (DLM_LOCK_RES_RECOVERING|
1418 DLM_LOCK_RES_MIGRATING)) {
1419 spin_unlock(&res->spinlock);
1420 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1421 "being recovered/migrated\n");
1422 response = DLM_MASTER_RESP_ERROR;
1423 if (mle)
1424 kmem_cache_free(dlm_mle_cache, mle);
1425 goto send_response;
1426 }
1427
1428 if (res->owner == dlm->node_num) {
1429 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1430 dlm->name, namelen, name, request->node_idx);
1431 dlm_lockres_set_refmap_bit(request->node_idx, res);
1432 spin_unlock(&res->spinlock);
1433 response = DLM_MASTER_RESP_YES;
1434 if (mle)
1435 kmem_cache_free(dlm_mle_cache, mle);
1436
1437 /* this node is the owner.
1438 * there is some extra work that needs to
1439 * happen now. the requesting node has
1440 * caused all nodes up to this one to
1441 * create mles. this node now needs to
1442 * go back and clean those up. */
1443 dispatch_assert = 1;
1444 goto send_response;
1445 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1446 spin_unlock(&res->spinlock);
1447 // mlog(0, "node %u is the master\n", res->owner);
1448 response = DLM_MASTER_RESP_NO;
1449 if (mle)
1450 kmem_cache_free(dlm_mle_cache, mle);
1451 goto send_response;
1452 }
1453
1454 /* ok, there is no owner. either this node is
1455 * being blocked, or it is actively trying to
1456 * master this lock. */
1457 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1458 mlog(ML_ERROR, "lock with no owner should be "
1459 "in-progress!\n");
1460 BUG();
1461 }
1462
1463 // mlog(0, "lockres is in progress...\n");
1464 spin_lock(&dlm->master_lock);
1465 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1466 if (!found) {
1467 mlog(ML_ERROR, "no mle found for this lock!\n");
1468 BUG();
1469 }
1470 set_maybe = 1;
1471 spin_lock(&tmpmle->spinlock);
1472 if (tmpmle->type == DLM_MLE_BLOCK) {
1473 // mlog(0, "this node is waiting for "
1474 // "lockres to be mastered\n");
1475 response = DLM_MASTER_RESP_NO;
1476 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1477 mlog(0, "node %u is master, but trying to migrate to "
1478 "node %u.\n", tmpmle->master, tmpmle->new_master);
1479 if (tmpmle->master == dlm->node_num) {
1480 mlog(ML_ERROR, "no owner on lockres, but this "
1481 "node is trying to migrate it to %u?!\n",
1482 tmpmle->new_master);
1483 BUG();
1484 } else {
1485 /* the real master can respond on its own */
1486 response = DLM_MASTER_RESP_NO;
1487 }
1488 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1489 set_maybe = 0;
1490 if (tmpmle->master == dlm->node_num) {
1491 response = DLM_MASTER_RESP_YES;
1492 /* this node will be the owner.
1493 * go back and clean the mles on any
1494 * other nodes */
1495 dispatch_assert = 1;
1496 dlm_lockres_set_refmap_bit(request->node_idx, res);
1497 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1498 dlm->name, namelen, name,
1499 request->node_idx);
1500 } else
1501 response = DLM_MASTER_RESP_NO;
1502 } else {
1503 // mlog(0, "this node is attempting to "
1504 // "master lockres\n");
1505 response = DLM_MASTER_RESP_MAYBE;
1506 }
1507 if (set_maybe)
1508 set_bit(request->node_idx, tmpmle->maybe_map);
1509 spin_unlock(&tmpmle->spinlock);
1510
1511 spin_unlock(&dlm->master_lock);
1512 spin_unlock(&res->spinlock);
1513
1514 /* keep the mle attached to heartbeat events */
1515 dlm_put_mle(tmpmle);
1516 if (mle)
1517 kmem_cache_free(dlm_mle_cache, mle);
1518 goto send_response;
1519 }
1520
1521 /*
1522 * lockres doesn't exist on this node
1523 * if there is an MLE_BLOCK, return NO
1524 * if there is an MLE_MASTER, return MAYBE
1525 * otherwise, add an MLE_BLOCK, return NO
1526 */
1527 spin_lock(&dlm->master_lock);
1528 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1529 if (!found) {
1530 /* this lockid has never been seen on this node yet */
1531 // mlog(0, "no mle found\n");
1532 if (!mle) {
1533 spin_unlock(&dlm->master_lock);
1534 spin_unlock(&dlm->spinlock);
1535
1536 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1537 if (!mle) {
1538 response = DLM_MASTER_RESP_ERROR;
1539 mlog_errno(-ENOMEM);
1540 goto send_response;
1541 }
1542 goto way_up_top;
1543 }
1544
1545 // mlog(0, "this is second time thru, already allocated, "
1546 // "add the block.\n");
1547 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1548 set_bit(request->node_idx, mle->maybe_map);
1549 __dlm_insert_mle(dlm, mle);
1550 response = DLM_MASTER_RESP_NO;
1551 } else {
1552 // mlog(0, "mle was found\n");
1553 set_maybe = 1;
1554 spin_lock(&tmpmle->spinlock);
1555 if (tmpmle->master == dlm->node_num) {
1556 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1557 BUG();
1558 }
1559 if (tmpmle->type == DLM_MLE_BLOCK)
1560 response = DLM_MASTER_RESP_NO;
1561 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1562 mlog(0, "migration mle was found (%u->%u)\n",
1563 tmpmle->master, tmpmle->new_master);
1564 /* real master can respond on its own */
1565 response = DLM_MASTER_RESP_NO;
1566 } else
1567 response = DLM_MASTER_RESP_MAYBE;
1568 if (set_maybe)
1569 set_bit(request->node_idx, tmpmle->maybe_map);
1570 spin_unlock(&tmpmle->spinlock);
1571 }
1572 spin_unlock(&dlm->master_lock);
1573 spin_unlock(&dlm->spinlock);
1574
1575 if (found) {
1576 /* keep the mle attached to heartbeat events */
1577 dlm_put_mle(tmpmle);
1578 }
1579 send_response:
1580 /*
1581 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1582 * The reference is released by dlm_assert_master_worker() under
1583 * the call to dlm_dispatch_assert_master(). If
1584 * dlm_assert_master_worker() isn't called, we drop it here.
1585 */
1586 if (dispatch_assert) {
1587 if (response != DLM_MASTER_RESP_YES)
1588 mlog(ML_ERROR, "invalid response %d\n", response);
1589 if (!res) {
1590 mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1591 BUG();
1592 }
1593 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1594 dlm->node_num, res->lockname.len, res->lockname.name);
1595 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1596 DLM_ASSERT_MASTER_MLE_CLEANUP);
1597 if (ret < 0) {
1598 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1599 response = DLM_MASTER_RESP_ERROR;
1600 dlm_lockres_put(res);
1601 }
1602 } else {
1603 if (res)
1604 dlm_lockres_put(res);
1605 }
1606
1607 dlm_put(dlm);
1608 return response;
1609 }
1610
1611 /*
1612 * DLM_ASSERT_MASTER_MSG
1613 */
1614
1615
1616 /*
1617 * NOTE: this can be used for debugging
1618 * can periodically run all locks owned by this node
1619 * and re-assert across the cluster...
1620 */
1621 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1622 struct dlm_lock_resource *res,
1623 void *nodemap, u32 flags)
1624 {
1625 struct dlm_assert_master assert;
1626 int to, tmpret;
1627 struct dlm_node_iter iter;
1628 int ret = 0;
1629 int reassert;
1630 const char *lockname = res->lockname.name;
1631 unsigned int namelen = res->lockname.len;
1632
1633 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1634
1635 spin_lock(&res->spinlock);
1636 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1637 spin_unlock(&res->spinlock);
1638
1639 again:
1640 reassert = 0;
1641
1642 /* note that if this nodemap is empty, it returns 0 */
1643 dlm_node_iter_init(nodemap, &iter);
1644 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1645 int r = 0;
1646 struct dlm_master_list_entry *mle = NULL;
1647
1648 mlog(0, "sending assert master to %d (%.*s)\n", to,
1649 namelen, lockname);
1650 memset(&assert, 0, sizeof(assert));
1651 assert.node_idx = dlm->node_num;
1652 assert.namelen = namelen;
1653 memcpy(assert.name, lockname, namelen);
1654 assert.flags = cpu_to_be32(flags);
1655
1656 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1657 &assert, sizeof(assert), to, &r);
1658 if (tmpret < 0) {
1659 mlog(ML_ERROR, "Error %d when sending message %u (key "
1660 "0x%x) to node %u\n", tmpret,
1661 DLM_ASSERT_MASTER_MSG, dlm->key, to);
1662 if (!dlm_is_host_down(tmpret)) {
1663 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1664 BUG();
1665 }
1666 /* a node died. finish out the rest of the nodes. */
1667 mlog(0, "link to %d went down!\n", to);
1668 /* any nonzero status return will do */
1669 ret = tmpret;
1670 r = 0;
1671 } else if (r < 0) {
1672 /* ok, something horribly messed. kill thyself. */
1673 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1674 "got %d.\n", namelen, lockname, to, r);
1675 spin_lock(&dlm->spinlock);
1676 spin_lock(&dlm->master_lock);
1677 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1678 namelen)) {
1679 dlm_print_one_mle(mle);
1680 __dlm_put_mle(mle);
1681 }
1682 spin_unlock(&dlm->master_lock);
1683 spin_unlock(&dlm->spinlock);
1684 BUG();
1685 }
1686
1687 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1688 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1689 mlog(ML_ERROR, "%.*s: very strange, "
1690 "master MLE but no lockres on %u\n",
1691 namelen, lockname, to);
1692 }
1693
1694 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1695 mlog(0, "%.*s: node %u create mles on other "
1696 "nodes and requests a re-assert\n",
1697 namelen, lockname, to);
1698 reassert = 1;
1699 }
1700 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1701 mlog(0, "%.*s: node %u has a reference to this "
1702 "lockres, set the bit in the refmap\n",
1703 namelen, lockname, to);
1704 spin_lock(&res->spinlock);
1705 dlm_lockres_set_refmap_bit(to, res);
1706 spin_unlock(&res->spinlock);
1707 }
1708 }
1709
1710 if (reassert)
1711 goto again;
1712
1713 spin_lock(&res->spinlock);
1714 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1715 spin_unlock(&res->spinlock);
1716 wake_up(&res->wq);
1717
1718 return ret;
1719 }
1720
1721 /*
1722 * locks that can be taken here:
1723 * dlm->spinlock
1724 * res->spinlock
1725 * mle->spinlock
1726 * dlm->master_list
1727 *
1728 * if possible, TRIM THIS DOWN!!!
1729 */
1730 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1731 void **ret_data)
1732 {
1733 struct dlm_ctxt *dlm = data;
1734 struct dlm_master_list_entry *mle = NULL;
1735 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1736 struct dlm_lock_resource *res = NULL;
1737 char *name;
1738 unsigned int namelen, hash;
1739 u32 flags;
1740 int master_request = 0, have_lockres_ref = 0;
1741 int ret = 0;
1742
1743 if (!dlm_grab(dlm))
1744 return 0;
1745
1746 name = assert->name;
1747 namelen = assert->namelen;
1748 hash = dlm_lockid_hash(name, namelen);
1749 flags = be32_to_cpu(assert->flags);
1750
1751 if (namelen > DLM_LOCKID_NAME_MAX) {
1752 mlog(ML_ERROR, "Invalid name length!");
1753 goto done;
1754 }
1755
1756 spin_lock(&dlm->spinlock);
1757
1758 if (flags)
1759 mlog(0, "assert_master with flags: %u\n", flags);
1760
1761 /* find the MLE */
1762 spin_lock(&dlm->master_lock);
1763 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1764 /* not an error, could be master just re-asserting */
1765 mlog(0, "just got an assert_master from %u, but no "
1766 "MLE for it! (%.*s)\n", assert->node_idx,
1767 namelen, name);
1768 } else {
1769 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1770 if (bit >= O2NM_MAX_NODES) {
1771 /* not necessarily an error, though less likely.
1772 * could be master just re-asserting. */
1773 mlog(0, "no bits set in the maybe_map, but %u "
1774 "is asserting! (%.*s)\n", assert->node_idx,
1775 namelen, name);
1776 } else if (bit != assert->node_idx) {
1777 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1778 mlog(0, "master %u was found, %u should "
1779 "back off\n", assert->node_idx, bit);
1780 } else {
1781 /* with the fix for bug 569, a higher node
1782 * number winning the mastery will respond
1783 * YES to mastery requests, but this node
1784 * had no way of knowing. let it pass. */
1785 mlog(0, "%u is the lowest node, "
1786 "%u is asserting. (%.*s) %u must "
1787 "have begun after %u won.\n", bit,
1788 assert->node_idx, namelen, name, bit,
1789 assert->node_idx);
1790 }
1791 }
1792 if (mle->type == DLM_MLE_MIGRATION) {
1793 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1794 mlog(0, "%s:%.*s: got cleanup assert"
1795 " from %u for migration\n",
1796 dlm->name, namelen, name,
1797 assert->node_idx);
1798 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1799 mlog(0, "%s:%.*s: got unrelated assert"
1800 " from %u for migration, ignoring\n",
1801 dlm->name, namelen, name,
1802 assert->node_idx);
1803 __dlm_put_mle(mle);
1804 spin_unlock(&dlm->master_lock);
1805 spin_unlock(&dlm->spinlock);
1806 goto done;
1807 }
1808 }
1809 }
1810 spin_unlock(&dlm->master_lock);
1811
1812 /* ok everything checks out with the MLE
1813 * now check to see if there is a lockres */
1814 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1815 if (res) {
1816 spin_lock(&res->spinlock);
1817 if (res->state & DLM_LOCK_RES_RECOVERING) {
1818 mlog(ML_ERROR, "%u asserting but %.*s is "
1819 "RECOVERING!\n", assert->node_idx, namelen, name);
1820 goto kill;
1821 }
1822 if (!mle) {
1823 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1824 res->owner != assert->node_idx) {
1825 mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1826 "but current owner is %u! (%.*s)\n",
1827 assert->node_idx, res->owner, namelen,
1828 name);
1829 __dlm_print_one_lock_resource(res);
1830 BUG();
1831 }
1832 } else if (mle->type != DLM_MLE_MIGRATION) {
1833 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1834 /* owner is just re-asserting */
1835 if (res->owner == assert->node_idx) {
1836 mlog(0, "owner %u re-asserting on "
1837 "lock %.*s\n", assert->node_idx,
1838 namelen, name);
1839 goto ok;
1840 }
1841 mlog(ML_ERROR, "got assert_master from "
1842 "node %u, but %u is the owner! "
1843 "(%.*s)\n", assert->node_idx,
1844 res->owner, namelen, name);
1845 goto kill;
1846 }
1847 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1848 mlog(ML_ERROR, "got assert from %u, but lock "
1849 "with no owner should be "
1850 "in-progress! (%.*s)\n",
1851 assert->node_idx,
1852 namelen, name);
1853 goto kill;
1854 }
1855 } else /* mle->type == DLM_MLE_MIGRATION */ {
1856 /* should only be getting an assert from new master */
1857 if (assert->node_idx != mle->new_master) {
1858 mlog(ML_ERROR, "got assert from %u, but "
1859 "new master is %u, and old master "
1860 "was %u (%.*s)\n",
1861 assert->node_idx, mle->new_master,
1862 mle->master, namelen, name);
1863 goto kill;
1864 }
1865
1866 }
1867 ok:
1868 spin_unlock(&res->spinlock);
1869 }
1870
1871 // mlog(0, "woo! got an assert_master from node %u!\n",
1872 // assert->node_idx);
1873 if (mle) {
1874 int extra_ref = 0;
1875 int nn = -1;
1876 int rr, err = 0;
1877
1878 spin_lock(&mle->spinlock);
1879 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1880 extra_ref = 1;
1881 else {
1882 /* MASTER mle: if any bits set in the response map
1883 * then the calling node needs to re-assert to clear
1884 * up nodes that this node contacted */
1885 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1886 nn+1)) < O2NM_MAX_NODES) {
1887 if (nn != dlm->node_num && nn != assert->node_idx)
1888 master_request = 1;
1889 }
1890 }
1891 mle->master = assert->node_idx;
1892 atomic_set(&mle->woken, 1);
1893 wake_up(&mle->wq);
1894 spin_unlock(&mle->spinlock);
1895
1896 if (res) {
1897 int wake = 0;
1898 spin_lock(&res->spinlock);
1899 if (mle->type == DLM_MLE_MIGRATION) {
1900 mlog(0, "finishing off migration of lockres %.*s, "
1901 "from %u to %u\n",
1902 res->lockname.len, res->lockname.name,
1903 dlm->node_num, mle->new_master);
1904 res->state &= ~DLM_LOCK_RES_MIGRATING;
1905 wake = 1;
1906 dlm_change_lockres_owner(dlm, res, mle->new_master);
1907 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1908 } else {
1909 dlm_change_lockres_owner(dlm, res, mle->master);
1910 }
1911 spin_unlock(&res->spinlock);
1912 have_lockres_ref = 1;
1913 if (wake)
1914 wake_up(&res->wq);
1915 }
1916
1917 /* master is known, detach if not already detached.
1918 * ensures that only one assert_master call will happen
1919 * on this mle. */
1920 spin_lock(&dlm->master_lock);
1921
1922 rr = atomic_read(&mle->mle_refs.refcount);
1923 if (mle->inuse > 0) {
1924 if (extra_ref && rr < 3)
1925 err = 1;
1926 else if (!extra_ref && rr < 2)
1927 err = 1;
1928 } else {
1929 if (extra_ref && rr < 2)
1930 err = 1;
1931 else if (!extra_ref && rr < 1)
1932 err = 1;
1933 }
1934 if (err) {
1935 mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1936 "that will mess up this node, refs=%d, extra=%d, "
1937 "inuse=%d\n", dlm->name, namelen, name,
1938 assert->node_idx, rr, extra_ref, mle->inuse);
1939 dlm_print_one_mle(mle);
1940 }
1941 __dlm_unlink_mle(dlm, mle);
1942 __dlm_mle_detach_hb_events(dlm, mle);
1943 __dlm_put_mle(mle);
1944 if (extra_ref) {
1945 /* the assert master message now balances the extra
1946 * ref given by the master / migration request message.
1947 * if this is the last put, it will be removed
1948 * from the list. */
1949 __dlm_put_mle(mle);
1950 }
1951 spin_unlock(&dlm->master_lock);
1952 } else if (res) {
1953 if (res->owner != assert->node_idx) {
1954 mlog(0, "assert_master from %u, but current "
1955 "owner is %u (%.*s), no mle\n", assert->node_idx,
1956 res->owner, namelen, name);
1957 }
1958 }
1959 spin_unlock(&dlm->spinlock);
1960
1961 done:
1962 ret = 0;
1963 if (res) {
1964 spin_lock(&res->spinlock);
1965 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1966 spin_unlock(&res->spinlock);
1967 *ret_data = (void *)res;
1968 }
1969 dlm_put(dlm);
1970 if (master_request) {
1971 mlog(0, "need to tell master to reassert\n");
1972 /* positive. negative would shoot down the node. */
1973 ret |= DLM_ASSERT_RESPONSE_REASSERT;
1974 if (!have_lockres_ref) {
1975 mlog(ML_ERROR, "strange, got assert from %u, MASTER "
1976 "mle present here for %s:%.*s, but no lockres!\n",
1977 assert->node_idx, dlm->name, namelen, name);
1978 }
1979 }
1980 if (have_lockres_ref) {
1981 /* let the master know we have a reference to the lockres */
1982 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
1983 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
1984 dlm->name, namelen, name, assert->node_idx);
1985 }
1986 return ret;
1987
1988 kill:
1989 /* kill the caller! */
1990 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
1991 "and killing the other node now! This node is OK and can continue.\n");
1992 __dlm_print_one_lock_resource(res);
1993 spin_unlock(&res->spinlock);
1994 spin_unlock(&dlm->spinlock);
1995 *ret_data = (void *)res;
1996 dlm_put(dlm);
1997 return -EINVAL;
1998 }
1999
2000 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2001 {
2002 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2003
2004 if (ret_data) {
2005 spin_lock(&res->spinlock);
2006 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2007 spin_unlock(&res->spinlock);
2008 wake_up(&res->wq);
2009 dlm_lockres_put(res);
2010 }
2011 return;
2012 }
2013
2014 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2015 struct dlm_lock_resource *res,
2016 int ignore_higher, u8 request_from, u32 flags)
2017 {
2018 struct dlm_work_item *item;
2019 item = kzalloc(sizeof(*item), GFP_NOFS);
2020 if (!item)
2021 return -ENOMEM;
2022
2023
2024 /* queue up work for dlm_assert_master_worker */
2025 dlm_grab(dlm); /* get an extra ref for the work item */
2026 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2027 item->u.am.lockres = res; /* already have a ref */
2028 /* can optionally ignore node numbers higher than this node */
2029 item->u.am.ignore_higher = ignore_higher;
2030 item->u.am.request_from = request_from;
2031 item->u.am.flags = flags;
2032
2033 if (ignore_higher)
2034 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2035 res->lockname.name);
2036
2037 spin_lock(&dlm->work_lock);
2038 list_add_tail(&item->list, &dlm->work_list);
2039 spin_unlock(&dlm->work_lock);
2040
2041 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2042 return 0;
2043 }
2044
2045 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2046 {
2047 struct dlm_ctxt *dlm = data;
2048 int ret = 0;
2049 struct dlm_lock_resource *res;
2050 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2051 int ignore_higher;
2052 int bit;
2053 u8 request_from;
2054 u32 flags;
2055
2056 dlm = item->dlm;
2057 res = item->u.am.lockres;
2058 ignore_higher = item->u.am.ignore_higher;
2059 request_from = item->u.am.request_from;
2060 flags = item->u.am.flags;
2061
2062 spin_lock(&dlm->spinlock);
2063 memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2064 spin_unlock(&dlm->spinlock);
2065
2066 clear_bit(dlm->node_num, nodemap);
2067 if (ignore_higher) {
2068 /* if is this just to clear up mles for nodes below
2069 * this node, do not send the message to the original
2070 * caller or any node number higher than this */
2071 clear_bit(request_from, nodemap);
2072 bit = dlm->node_num;
2073 while (1) {
2074 bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2075 bit+1);
2076 if (bit >= O2NM_MAX_NODES)
2077 break;
2078 clear_bit(bit, nodemap);
2079 }
2080 }
2081
2082 /*
2083 * If we're migrating this lock to someone else, we are no
2084 * longer allowed to assert out own mastery. OTOH, we need to
2085 * prevent migration from starting while we're still asserting
2086 * our dominance. The reserved ast delays migration.
2087 */
2088 spin_lock(&res->spinlock);
2089 if (res->state & DLM_LOCK_RES_MIGRATING) {
2090 mlog(0, "Someone asked us to assert mastery, but we're "
2091 "in the middle of migration. Skipping assert, "
2092 "the new master will handle that.\n");
2093 spin_unlock(&res->spinlock);
2094 goto put;
2095 } else
2096 __dlm_lockres_reserve_ast(res);
2097 spin_unlock(&res->spinlock);
2098
2099 /* this call now finishes out the nodemap
2100 * even if one or more nodes die */
2101 mlog(0, "worker about to master %.*s here, this=%u\n",
2102 res->lockname.len, res->lockname.name, dlm->node_num);
2103 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2104 if (ret < 0) {
2105 /* no need to restart, we are done */
2106 if (!dlm_is_host_down(ret))
2107 mlog_errno(ret);
2108 }
2109
2110 /* Ok, we've asserted ourselves. Let's let migration start. */
2111 dlm_lockres_release_ast(dlm, res);
2112
2113 put:
2114 dlm_lockres_put(res);
2115
2116 mlog(0, "finished with dlm_assert_master_worker\n");
2117 }
2118
2119 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2120 * We cannot wait for node recovery to complete to begin mastering this
2121 * lockres because this lockres is used to kick off recovery! ;-)
2122 * So, do a pre-check on all living nodes to see if any of those nodes
2123 * think that $RECOVERY is currently mastered by a dead node. If so,
2124 * we wait a short time to allow that node to get notified by its own
2125 * heartbeat stack, then check again. All $RECOVERY lock resources
2126 * mastered by dead nodes are purged when the hearbeat callback is
2127 * fired, so we can know for sure that it is safe to continue once
2128 * the node returns a live node or no node. */
2129 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2130 struct dlm_lock_resource *res)
2131 {
2132 struct dlm_node_iter iter;
2133 int nodenum;
2134 int ret = 0;
2135 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2136
2137 spin_lock(&dlm->spinlock);
2138 dlm_node_iter_init(dlm->domain_map, &iter);
2139 spin_unlock(&dlm->spinlock);
2140
2141 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2142 /* do not send to self */
2143 if (nodenum == dlm->node_num)
2144 continue;
2145 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2146 if (ret < 0) {
2147 mlog_errno(ret);
2148 if (!dlm_is_host_down(ret))
2149 BUG();
2150 /* host is down, so answer for that node would be
2151 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2152 ret = 0;
2153 }
2154
2155 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2156 /* check to see if this master is in the recovery map */
2157 spin_lock(&dlm->spinlock);
2158 if (test_bit(master, dlm->recovery_map)) {
2159 mlog(ML_NOTICE, "%s: node %u has not seen "
2160 "node %u go down yet, and thinks the "
2161 "dead node is mastering the recovery "
2162 "lock. must wait.\n", dlm->name,
2163 nodenum, master);
2164 ret = -EAGAIN;
2165 }
2166 spin_unlock(&dlm->spinlock);
2167 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2168 master);
2169 break;
2170 }
2171 }
2172 return ret;
2173 }
2174
2175 /*
2176 * DLM_DEREF_LOCKRES_MSG
2177 */
2178
2179 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2180 {
2181 struct dlm_deref_lockres deref;
2182 int ret = 0, r;
2183 const char *lockname;
2184 unsigned int namelen;
2185
2186 lockname = res->lockname.name;
2187 namelen = res->lockname.len;
2188 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2189
2190 mlog(0, "%s:%.*s: sending deref to %d\n",
2191 dlm->name, namelen, lockname, res->owner);
2192 memset(&deref, 0, sizeof(deref));
2193 deref.node_idx = dlm->node_num;
2194 deref.namelen = namelen;
2195 memcpy(deref.name, lockname, namelen);
2196
2197 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2198 &deref, sizeof(deref), res->owner, &r);
2199 if (ret < 0)
2200 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
2201 "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
2202 res->owner);
2203 else if (r < 0) {
2204 /* BAD. other node says I did not have a ref. */
2205 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2206 "(master=%u) got %d.\n", dlm->name, namelen,
2207 lockname, res->owner, r);
2208 dlm_print_one_lock_resource(res);
2209 BUG();
2210 }
2211 return ret;
2212 }
2213
2214 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2215 void **ret_data)
2216 {
2217 struct dlm_ctxt *dlm = data;
2218 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2219 struct dlm_lock_resource *res = NULL;
2220 char *name;
2221 unsigned int namelen;
2222 int ret = -EINVAL;
2223 u8 node;
2224 unsigned int hash;
2225 struct dlm_work_item *item;
2226 int cleared = 0;
2227 int dispatch = 0;
2228
2229 if (!dlm_grab(dlm))
2230 return 0;
2231
2232 name = deref->name;
2233 namelen = deref->namelen;
2234 node = deref->node_idx;
2235
2236 if (namelen > DLM_LOCKID_NAME_MAX) {
2237 mlog(ML_ERROR, "Invalid name length!");
2238 goto done;
2239 }
2240 if (deref->node_idx >= O2NM_MAX_NODES) {
2241 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2242 goto done;
2243 }
2244
2245 hash = dlm_lockid_hash(name, namelen);
2246
2247 spin_lock(&dlm->spinlock);
2248 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2249 if (!res) {
2250 spin_unlock(&dlm->spinlock);
2251 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2252 dlm->name, namelen, name);
2253 goto done;
2254 }
2255 spin_unlock(&dlm->spinlock);
2256
2257 spin_lock(&res->spinlock);
2258 if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2259 dispatch = 1;
2260 else {
2261 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2262 if (test_bit(node, res->refmap)) {
2263 dlm_lockres_clear_refmap_bit(node, res);
2264 cleared = 1;
2265 }
2266 }
2267 spin_unlock(&res->spinlock);
2268
2269 if (!dispatch) {
2270 if (cleared)
2271 dlm_lockres_calc_usage(dlm, res);
2272 else {
2273 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2274 "but it is already dropped!\n", dlm->name,
2275 res->lockname.len, res->lockname.name, node);
2276 dlm_print_one_lock_resource(res);
2277 }
2278 ret = 0;
2279 goto done;
2280 }
2281
2282 item = kzalloc(sizeof(*item), GFP_NOFS);
2283 if (!item) {
2284 ret = -ENOMEM;
2285 mlog_errno(ret);
2286 goto done;
2287 }
2288
2289 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2290 item->u.dl.deref_res = res;
2291 item->u.dl.deref_node = node;
2292
2293 spin_lock(&dlm->work_lock);
2294 list_add_tail(&item->list, &dlm->work_list);
2295 spin_unlock(&dlm->work_lock);
2296
2297 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2298 return 0;
2299
2300 done:
2301 if (res)
2302 dlm_lockres_put(res);
2303 dlm_put(dlm);
2304
2305 return ret;
2306 }
2307
2308 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2309 {
2310 struct dlm_ctxt *dlm;
2311 struct dlm_lock_resource *res;
2312 u8 node;
2313 u8 cleared = 0;
2314
2315 dlm = item->dlm;
2316 res = item->u.dl.deref_res;
2317 node = item->u.dl.deref_node;
2318
2319 spin_lock(&res->spinlock);
2320 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2321 if (test_bit(node, res->refmap)) {
2322 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2323 dlm_lockres_clear_refmap_bit(node, res);
2324 cleared = 1;
2325 }
2326 spin_unlock(&res->spinlock);
2327
2328 if (cleared) {
2329 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2330 dlm->name, res->lockname.len, res->lockname.name, node);
2331 dlm_lockres_calc_usage(dlm, res);
2332 } else {
2333 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2334 "but it is already dropped!\n", dlm->name,
2335 res->lockname.len, res->lockname.name, node);
2336 dlm_print_one_lock_resource(res);
2337 }
2338
2339 dlm_lockres_put(res);
2340 }
2341
2342 /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
2343 * if not. If 0, numlocks is set to the number of locks in the lockres.
2344 */
2345 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2346 struct dlm_lock_resource *res,
2347 int *numlocks,
2348 int *hasrefs)
2349 {
2350 int ret;
2351 int i;
2352 int count = 0;
2353 struct list_head *queue;
2354 struct dlm_lock *lock;
2355
2356 assert_spin_locked(&res->spinlock);
2357
2358 *numlocks = 0;
2359 *hasrefs = 0;
2360
2361 ret = -EINVAL;
2362 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2363 mlog(0, "cannot migrate lockres with unknown owner!\n");
2364 goto leave;
2365 }
2366
2367 if (res->owner != dlm->node_num) {
2368 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2369 goto leave;
2370 }
2371
2372 ret = 0;
2373 queue = &res->granted;
2374 for (i = 0; i < 3; i++) {
2375 list_for_each_entry(lock, queue, list) {
2376 ++count;
2377 if (lock->ml.node == dlm->node_num) {
2378 mlog(0, "found a lock owned by this node still "
2379 "on the %s queue! will not migrate this "
2380 "lockres\n", (i == 0 ? "granted" :
2381 (i == 1 ? "converting" :
2382 "blocked")));
2383 ret = -ENOTEMPTY;
2384 goto leave;
2385 }
2386 }
2387 queue++;
2388 }
2389
2390 *numlocks = count;
2391
2392 count = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2393 if (count < O2NM_MAX_NODES)
2394 *hasrefs = 1;
2395
2396 mlog(0, "%s: res %.*s, Migrateable, locks %d, refs %d\n", dlm->name,
2397 res->lockname.len, res->lockname.name, *numlocks, *hasrefs);
2398
2399 leave:
2400 return ret;
2401 }
2402
2403 /*
2404 * DLM_MIGRATE_LOCKRES
2405 */
2406
2407
2408 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2409 struct dlm_lock_resource *res,
2410 u8 target)
2411 {
2412 struct dlm_master_list_entry *mle = NULL;
2413 struct dlm_master_list_entry *oldmle = NULL;
2414 struct dlm_migratable_lockres *mres = NULL;
2415 int ret = 0;
2416 const char *name;
2417 unsigned int namelen;
2418 int mle_added = 0;
2419 int numlocks, hasrefs;
2420 int wake = 0;
2421
2422 if (!dlm_grab(dlm))
2423 return -EINVAL;
2424
2425 name = res->lockname.name;
2426 namelen = res->lockname.len;
2427
2428 mlog(0, "%s: Migrating %.*s to %u\n", dlm->name, namelen, name, target);
2429
2430 /*
2431 * ensure this lockres is a proper candidate for migration
2432 */
2433 spin_lock(&res->spinlock);
2434 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
2435 if (ret < 0) {
2436 spin_unlock(&res->spinlock);
2437 goto leave;
2438 }
2439 spin_unlock(&res->spinlock);
2440
2441 /* no work to do */
2442 if (numlocks == 0 && !hasrefs)
2443 goto leave;
2444
2445 /*
2446 * preallocate up front
2447 * if this fails, abort
2448 */
2449
2450 ret = -ENOMEM;
2451 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2452 if (!mres) {
2453 mlog_errno(ret);
2454 goto leave;
2455 }
2456
2457 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2458 if (!mle) {
2459 mlog_errno(ret);
2460 goto leave;
2461 }
2462 ret = 0;
2463
2464 /*
2465 * find a node to migrate the lockres to
2466 */
2467
2468 spin_lock(&dlm->spinlock);
2469 /* pick a new node */
2470 if (!test_bit(target, dlm->domain_map) ||
2471 target >= O2NM_MAX_NODES) {
2472 target = dlm_pick_migration_target(dlm, res);
2473 }
2474 mlog(0, "%s: res %.*s, Node %u chosen for migration\n", dlm->name,
2475 namelen, name, target);
2476
2477 if (target >= O2NM_MAX_NODES ||
2478 !test_bit(target, dlm->domain_map)) {
2479 /* target chosen is not alive */
2480 ret = -EINVAL;
2481 }
2482
2483 if (ret) {
2484 spin_unlock(&dlm->spinlock);
2485 goto fail;
2486 }
2487
2488 mlog(0, "continuing with target = %u\n", target);
2489
2490 /*
2491 * clear any existing master requests and
2492 * add the migration mle to the list
2493 */
2494 spin_lock(&dlm->master_lock);
2495 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2496 namelen, target, dlm->node_num);
2497 spin_unlock(&dlm->master_lock);
2498 spin_unlock(&dlm->spinlock);
2499
2500 if (ret == -EEXIST) {
2501 mlog(0, "another process is already migrating it\n");
2502 goto fail;
2503 }
2504 mle_added = 1;
2505
2506 /*
2507 * set the MIGRATING flag and flush asts
2508 * if we fail after this we need to re-dirty the lockres
2509 */
2510 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2511 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2512 "the target went down.\n", res->lockname.len,
2513 res->lockname.name, target);
2514 spin_lock(&res->spinlock);
2515 res->state &= ~DLM_LOCK_RES_MIGRATING;
2516 wake = 1;
2517 spin_unlock(&res->spinlock);
2518 ret = -EINVAL;
2519 }
2520
2521 fail:
2522 if (oldmle) {
2523 /* master is known, detach if not already detached */
2524 dlm_mle_detach_hb_events(dlm, oldmle);
2525 dlm_put_mle(oldmle);
2526 }
2527
2528 if (ret < 0) {
2529 if (mle_added) {
2530 dlm_mle_detach_hb_events(dlm, mle);
2531 dlm_put_mle(mle);
2532 } else if (mle) {
2533 kmem_cache_free(dlm_mle_cache, mle);
2534 }
2535 goto leave;
2536 }
2537
2538 /*
2539 * at this point, we have a migration target, an mle
2540 * in the master list, and the MIGRATING flag set on
2541 * the lockres
2542 */
2543
2544 /* now that remote nodes are spinning on the MIGRATING flag,
2545 * ensure that all assert_master work is flushed. */
2546 flush_workqueue(dlm->dlm_worker);
2547
2548 /* get an extra reference on the mle.
2549 * otherwise the assert_master from the new
2550 * master will destroy this.
2551 * also, make sure that all callers of dlm_get_mle
2552 * take both dlm->spinlock and dlm->master_lock */
2553 spin_lock(&dlm->spinlock);
2554 spin_lock(&dlm->master_lock);
2555 dlm_get_mle_inuse(mle);
2556 spin_unlock(&dlm->master_lock);
2557 spin_unlock(&dlm->spinlock);
2558
2559 /* notify new node and send all lock state */
2560 /* call send_one_lockres with migration flag.
2561 * this serves as notice to the target node that a
2562 * migration is starting. */
2563 ret = dlm_send_one_lockres(dlm, res, mres, target,
2564 DLM_MRES_MIGRATION);
2565
2566 if (ret < 0) {
2567 mlog(0, "migration to node %u failed with %d\n",
2568 target, ret);
2569 /* migration failed, detach and clean up mle */
2570 dlm_mle_detach_hb_events(dlm, mle);
2571 dlm_put_mle(mle);
2572 dlm_put_mle_inuse(mle);
2573 spin_lock(&res->spinlock);
2574 res->state &= ~DLM_LOCK_RES_MIGRATING;
2575 wake = 1;
2576 spin_unlock(&res->spinlock);
2577 goto leave;
2578 }
2579
2580 /* at this point, the target sends a message to all nodes,
2581 * (using dlm_do_migrate_request). this node is skipped since
2582 * we had to put an mle in the list to begin the process. this
2583 * node now waits for target to do an assert master. this node
2584 * will be the last one notified, ensuring that the migration
2585 * is complete everywhere. if the target dies while this is
2586 * going on, some nodes could potentially see the target as the
2587 * master, so it is important that my recovery finds the migration
2588 * mle and sets the master to UNKNOWN. */
2589
2590
2591 /* wait for new node to assert master */
2592 while (1) {
2593 ret = wait_event_interruptible_timeout(mle->wq,
2594 (atomic_read(&mle->woken) == 1),
2595 msecs_to_jiffies(5000));
2596
2597 if (ret >= 0) {
2598 if (atomic_read(&mle->woken) == 1 ||
2599 res->owner == target)
2600 break;
2601
2602 mlog(0, "%s:%.*s: timed out during migration\n",
2603 dlm->name, res->lockname.len, res->lockname.name);
2604 /* avoid hang during shutdown when migrating lockres
2605 * to a node which also goes down */
2606 if (dlm_is_node_dead(dlm, target)) {
2607 mlog(0, "%s:%.*s: expected migration "
2608 "target %u is no longer up, restarting\n",
2609 dlm->name, res->lockname.len,
2610 res->lockname.name, target);
2611 ret = -EINVAL;
2612 /* migration failed, detach and clean up mle */
2613 dlm_mle_detach_hb_events(dlm, mle);
2614 dlm_put_mle(mle);
2615 dlm_put_mle_inuse(mle);
2616 spin_lock(&res->spinlock);
2617 res->state &= ~DLM_LOCK_RES_MIGRATING;
2618 wake = 1;
2619 spin_unlock(&res->spinlock);
2620 goto leave;
2621 }
2622 } else
2623 mlog(0, "%s:%.*s: caught signal during migration\n",
2624 dlm->name, res->lockname.len, res->lockname.name);
2625 }
2626
2627 /* all done, set the owner, clear the flag */
2628 spin_lock(&res->spinlock);
2629 dlm_set_lockres_owner(dlm, res, target);
2630 res->state &= ~DLM_LOCK_RES_MIGRATING;
2631 dlm_remove_nonlocal_locks(dlm, res);
2632 spin_unlock(&res->spinlock);
2633 wake_up(&res->wq);
2634
2635 /* master is known, detach if not already detached */
2636 dlm_mle_detach_hb_events(dlm, mle);
2637 dlm_put_mle_inuse(mle);
2638 ret = 0;
2639
2640 dlm_lockres_calc_usage(dlm, res);
2641
2642 leave:
2643 /* re-dirty the lockres if we failed */
2644 if (ret < 0)
2645 dlm_kick_thread(dlm, res);
2646
2647 /* wake up waiters if the MIGRATING flag got set
2648 * but migration failed */
2649 if (wake)
2650 wake_up(&res->wq);
2651
2652 /* TODO: cleanup */
2653 if (mres)
2654 free_page((unsigned long)mres);
2655
2656 dlm_put(dlm);
2657
2658 mlog(0, "returning %d\n", ret);
2659 return ret;
2660 }
2661
2662 #define DLM_MIGRATION_RETRY_MS 100
2663
2664 /* Should be called only after beginning the domain leave process.
2665 * There should not be any remaining locks on nonlocal lock resources,
2666 * and there should be no local locks left on locally mastered resources.
2667 *
2668 * Called with the dlm spinlock held, may drop it to do migration, but
2669 * will re-acquire before exit.
2670 *
2671 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2672 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2673 {
2674 int ret;
2675 int lock_dropped = 0;
2676 int numlocks, hasrefs;
2677
2678 spin_lock(&res->spinlock);
2679 if (res->owner != dlm->node_num) {
2680 if (!__dlm_lockres_unused(res)) {
2681 mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2682 "trying to free this but locks remain\n",
2683 dlm->name, res->lockname.len, res->lockname.name);
2684 }
2685 spin_unlock(&res->spinlock);
2686 goto leave;
2687 }
2688
2689 /* No need to migrate a lockres having no locks */
2690 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
2691 if (ret >= 0 && numlocks == 0 && !hasrefs) {
2692 spin_unlock(&res->spinlock);
2693 goto leave;
2694 }
2695 spin_unlock(&res->spinlock);
2696
2697 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2698 spin_unlock(&dlm->spinlock);
2699 lock_dropped = 1;
2700 while (1) {
2701 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2702 if (ret >= 0)
2703 break;
2704 if (ret == -ENOTEMPTY) {
2705 mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2706 res->lockname.len, res->lockname.name);
2707 BUG();
2708 }
2709
2710 mlog(0, "lockres %.*s: migrate failed, "
2711 "retrying\n", res->lockname.len,
2712 res->lockname.name);
2713 msleep(DLM_MIGRATION_RETRY_MS);
2714 }
2715 spin_lock(&dlm->spinlock);
2716 leave:
2717 return lock_dropped;
2718 }
2719
2720 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2721 {
2722 int ret;
2723 spin_lock(&dlm->ast_lock);
2724 spin_lock(&lock->spinlock);
2725 ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2726 spin_unlock(&lock->spinlock);
2727 spin_unlock(&dlm->ast_lock);
2728 return ret;
2729 }
2730
2731 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2732 struct dlm_lock_resource *res,
2733 u8 mig_target)
2734 {
2735 int can_proceed;
2736 spin_lock(&res->spinlock);
2737 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2738 spin_unlock(&res->spinlock);
2739
2740 /* target has died, so make the caller break out of the
2741 * wait_event, but caller must recheck the domain_map */
2742 spin_lock(&dlm->spinlock);
2743 if (!test_bit(mig_target, dlm->domain_map))
2744 can_proceed = 1;
2745 spin_unlock(&dlm->spinlock);
2746 return can_proceed;
2747 }
2748
2749 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2750 struct dlm_lock_resource *res)
2751 {
2752 int ret;
2753 spin_lock(&res->spinlock);
2754 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2755 spin_unlock(&res->spinlock);
2756 return ret;
2757 }
2758
2759
2760 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2761 struct dlm_lock_resource *res,
2762 u8 target)
2763 {
2764 int ret = 0;
2765
2766 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2767 res->lockname.len, res->lockname.name, dlm->node_num,
2768 target);
2769 /* need to set MIGRATING flag on lockres. this is done by
2770 * ensuring that all asts have been flushed for this lockres. */
2771 spin_lock(&res->spinlock);
2772 BUG_ON(res->migration_pending);
2773 res->migration_pending = 1;
2774 /* strategy is to reserve an extra ast then release
2775 * it below, letting the release do all of the work */
2776 __dlm_lockres_reserve_ast(res);
2777 spin_unlock(&res->spinlock);
2778
2779 /* now flush all the pending asts */
2780 dlm_kick_thread(dlm, res);
2781 /* before waiting on DIRTY, block processes which may
2782 * try to dirty the lockres before MIGRATING is set */
2783 spin_lock(&res->spinlock);
2784 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2785 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2786 spin_unlock(&res->spinlock);
2787 /* now wait on any pending asts and the DIRTY state */
2788 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2789 dlm_lockres_release_ast(dlm, res);
2790
2791 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2792 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2793 /* if the extra ref we just put was the final one, this
2794 * will pass thru immediately. otherwise, we need to wait
2795 * for the last ast to finish. */
2796 again:
2797 ret = wait_event_interruptible_timeout(dlm->migration_wq,
2798 dlm_migration_can_proceed(dlm, res, target),
2799 msecs_to_jiffies(1000));
2800 if (ret < 0) {
2801 mlog(0, "woken again: migrating? %s, dead? %s\n",
2802 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2803 test_bit(target, dlm->domain_map) ? "no":"yes");
2804 } else {
2805 mlog(0, "all is well: migrating? %s, dead? %s\n",
2806 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2807 test_bit(target, dlm->domain_map) ? "no":"yes");
2808 }
2809 if (!dlm_migration_can_proceed(dlm, res, target)) {
2810 mlog(0, "trying again...\n");
2811 goto again;
2812 }
2813
2814 ret = 0;
2815 /* did the target go down or die? */
2816 spin_lock(&dlm->spinlock);
2817 if (!test_bit(target, dlm->domain_map)) {
2818 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2819 target);
2820 ret = -EHOSTDOWN;
2821 }
2822 spin_unlock(&dlm->spinlock);
2823
2824 /*
2825 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
2826 * another try; otherwise, we are sure the MIGRATING state is there,
2827 * drop the unneded state which blocked threads trying to DIRTY
2828 */
2829 spin_lock(&res->spinlock);
2830 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2831 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2832 if (!ret)
2833 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2834 spin_unlock(&res->spinlock);
2835
2836 /*
2837 * at this point:
2838 *
2839 * o the DLM_LOCK_RES_MIGRATING flag is set if target not down
2840 * o there are no pending asts on this lockres
2841 * o all processes trying to reserve an ast on this
2842 * lockres must wait for the MIGRATING flag to clear
2843 */
2844 return ret;
2845 }
2846
2847 /* last step in the migration process.
2848 * original master calls this to free all of the dlm_lock
2849 * structures that used to be for other nodes. */
2850 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2851 struct dlm_lock_resource *res)
2852 {
2853 struct list_head *queue = &res->granted;
2854 int i, bit;
2855 struct dlm_lock *lock, *next;
2856
2857 assert_spin_locked(&res->spinlock);
2858
2859 BUG_ON(res->owner == dlm->node_num);
2860
2861 for (i=0; i<3; i++) {
2862 list_for_each_entry_safe(lock, next, queue, list) {
2863 if (lock->ml.node != dlm->node_num) {
2864 mlog(0, "putting lock for node %u\n",
2865 lock->ml.node);
2866 /* be extra careful */
2867 BUG_ON(!list_empty(&lock->ast_list));
2868 BUG_ON(!list_empty(&lock->bast_list));
2869 BUG_ON(lock->ast_pending);
2870 BUG_ON(lock->bast_pending);
2871 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2872 list_del_init(&lock->list);
2873 dlm_lock_put(lock);
2874 /* In a normal unlock, we would have added a
2875 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2876 dlm_lock_put(lock);
2877 }
2878 }
2879 queue++;
2880 }
2881 bit = 0;
2882 while (1) {
2883 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2884 if (bit >= O2NM_MAX_NODES)
2885 break;
2886 /* do not clear the local node reference, if there is a
2887 * process holding this, let it drop the ref itself */
2888 if (bit != dlm->node_num) {
2889 mlog(0, "%s:%.*s: node %u had a ref to this "
2890 "migrating lockres, clearing\n", dlm->name,
2891 res->lockname.len, res->lockname.name, bit);
2892 dlm_lockres_clear_refmap_bit(bit, res);
2893 }
2894 bit++;
2895 }
2896 }
2897
2898 /* for now this is not too intelligent. we will
2899 * need stats to make this do the right thing.
2900 * this just finds the first lock on one of the
2901 * queues and uses that node as the target. */
2902 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2903 struct dlm_lock_resource *res)
2904 {
2905 int i;
2906 struct list_head *queue = &res->granted;
2907 struct dlm_lock *lock;
2908 int nodenum;
2909
2910 assert_spin_locked(&dlm->spinlock);
2911
2912 spin_lock(&res->spinlock);
2913 for (i=0; i<3; i++) {
2914 list_for_each_entry(lock, queue, list) {
2915 /* up to the caller to make sure this node
2916 * is alive */
2917 if (lock->ml.node != dlm->node_num) {
2918 spin_unlock(&res->spinlock);
2919 return lock->ml.node;
2920 }
2921 }
2922 queue++;
2923 }
2924
2925 nodenum = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2926 if (nodenum < O2NM_MAX_NODES) {
2927 spin_unlock(&res->spinlock);
2928 return nodenum;
2929 }
2930 spin_unlock(&res->spinlock);
2931 mlog(0, "have not found a suitable target yet! checking domain map\n");
2932
2933 /* ok now we're getting desperate. pick anyone alive. */
2934 nodenum = -1;
2935 while (1) {
2936 nodenum = find_next_bit(dlm->domain_map,
2937 O2NM_MAX_NODES, nodenum+1);
2938 mlog(0, "found %d in domain map\n", nodenum);
2939 if (nodenum >= O2NM_MAX_NODES)
2940 break;
2941 if (nodenum != dlm->node_num) {
2942 mlog(0, "picking %d\n", nodenum);
2943 return nodenum;
2944 }
2945 }
2946
2947 mlog(0, "giving up. no master to migrate to\n");
2948 return DLM_LOCK_RES_OWNER_UNKNOWN;
2949 }
2950
2951
2952
2953 /* this is called by the new master once all lockres
2954 * data has been received */
2955 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2956 struct dlm_lock_resource *res,
2957 u8 master, u8 new_master,
2958 struct dlm_node_iter *iter)
2959 {
2960 struct dlm_migrate_request migrate;
2961 int ret, skip, status = 0;
2962 int nodenum;
2963
2964 memset(&migrate, 0, sizeof(migrate));
2965 migrate.namelen = res->lockname.len;
2966 memcpy(migrate.name, res->lockname.name, migrate.namelen);
2967 migrate.new_master = new_master;
2968 migrate.master = master;
2969
2970 ret = 0;
2971
2972 /* send message to all nodes, except the master and myself */
2973 while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2974 if (nodenum == master ||
2975 nodenum == new_master)
2976 continue;
2977
2978 /* We could race exit domain. If exited, skip. */
2979 spin_lock(&dlm->spinlock);
2980 skip = (!test_bit(nodenum, dlm->domain_map));
2981 spin_unlock(&dlm->spinlock);
2982 if (skip) {
2983 clear_bit(nodenum, iter->node_map);
2984 continue;
2985 }
2986
2987 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2988 &migrate, sizeof(migrate), nodenum,
2989 &status);
2990 if (ret < 0) {
2991 mlog(ML_ERROR, "Error %d when sending message %u (key "
2992 "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
2993 dlm->key, nodenum);
2994 if (!dlm_is_host_down(ret)) {
2995 mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2996 BUG();
2997 }
2998 clear_bit(nodenum, iter->node_map);
2999 ret = 0;
3000 } else if (status < 0) {
3001 mlog(0, "migrate request (node %u) returned %d!\n",
3002 nodenum, status);
3003 ret = status;
3004 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3005 /* during the migration request we short-circuited
3006 * the mastery of the lockres. make sure we have
3007 * a mastery ref for nodenum */
3008 mlog(0, "%s:%.*s: need ref for node %u\n",
3009 dlm->name, res->lockname.len, res->lockname.name,
3010 nodenum);
3011 spin_lock(&res->spinlock);
3012 dlm_lockres_set_refmap_bit(nodenum, res);
3013 spin_unlock(&res->spinlock);
3014 }
3015 }
3016
3017 if (ret < 0)
3018 mlog_errno(ret);
3019
3020 mlog(0, "returning ret=%d\n", ret);
3021 return ret;
3022 }
3023
3024
3025 /* if there is an existing mle for this lockres, we now know who the master is.
3026 * (the one who sent us *this* message) we can clear it up right away.
3027 * since the process that put the mle on the list still has a reference to it,
3028 * we can unhash it now, set the master and wake the process. as a result,
3029 * we will have no mle in the list to start with. now we can add an mle for
3030 * the migration and this should be the only one found for those scanning the
3031 * list. */
3032 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3033 void **ret_data)
3034 {
3035 struct dlm_ctxt *dlm = data;
3036 struct dlm_lock_resource *res = NULL;
3037 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3038 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3039 const char *name;
3040 unsigned int namelen, hash;
3041 int ret = 0;
3042
3043 if (!dlm_grab(dlm))
3044 return -EINVAL;
3045
3046 name = migrate->name;
3047 namelen = migrate->namelen;
3048 hash = dlm_lockid_hash(name, namelen);
3049
3050 /* preallocate.. if this fails, abort */
3051 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
3052
3053 if (!mle) {
3054 ret = -ENOMEM;
3055 goto leave;
3056 }
3057
3058 /* check for pre-existing lock */
3059 spin_lock(&dlm->spinlock);
3060 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3061 if (res) {
3062 spin_lock(&res->spinlock);
3063 if (res->state & DLM_LOCK_RES_RECOVERING) {
3064 /* if all is working ok, this can only mean that we got
3065 * a migrate request from a node that we now see as
3066 * dead. what can we do here? drop it to the floor? */
3067 spin_unlock(&res->spinlock);
3068 mlog(ML_ERROR, "Got a migrate request, but the "
3069 "lockres is marked as recovering!");
3070 kmem_cache_free(dlm_mle_cache, mle);
3071 ret = -EINVAL; /* need a better solution */
3072 goto unlock;
3073 }
3074 res->state |= DLM_LOCK_RES_MIGRATING;
3075 spin_unlock(&res->spinlock);
3076 }
3077
3078 spin_lock(&dlm->master_lock);
3079 /* ignore status. only nonzero status would BUG. */
3080 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3081 name, namelen,
3082 migrate->new_master,
3083 migrate->master);
3084
3085 spin_unlock(&dlm->master_lock);
3086 unlock:
3087 spin_unlock(&dlm->spinlock);
3088
3089 if (oldmle) {
3090 /* master is known, detach if not already detached */
3091 dlm_mle_detach_hb_events(dlm, oldmle);
3092 dlm_put_mle(oldmle);
3093 }
3094
3095 if (res)
3096 dlm_lockres_put(res);
3097 leave:
3098 dlm_put(dlm);
3099 return ret;
3100 }
3101
3102 /* must be holding dlm->spinlock and dlm->master_lock
3103 * when adding a migration mle, we can clear any other mles
3104 * in the master list because we know with certainty that
3105 * the master is "master". so we remove any old mle from
3106 * the list after setting it's master field, and then add
3107 * the new migration mle. this way we can hold with the rule
3108 * of having only one mle for a given lock name at all times. */
3109 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3110 struct dlm_lock_resource *res,
3111 struct dlm_master_list_entry *mle,
3112 struct dlm_master_list_entry **oldmle,
3113 const char *name, unsigned int namelen,
3114 u8 new_master, u8 master)
3115 {
3116 int found;
3117 int ret = 0;
3118
3119 *oldmle = NULL;
3120
3121 assert_spin_locked(&dlm->spinlock);
3122 assert_spin_locked(&dlm->master_lock);
3123
3124 /* caller is responsible for any ref taken here on oldmle */
3125 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3126 if (found) {
3127 struct dlm_master_list_entry *tmp = *oldmle;
3128 spin_lock(&tmp->spinlock);
3129 if (tmp->type == DLM_MLE_MIGRATION) {
3130 if (master == dlm->node_num) {
3131 /* ah another process raced me to it */
3132 mlog(0, "tried to migrate %.*s, but some "
3133 "process beat me to it\n",
3134 namelen, name);
3135 ret = -EEXIST;
3136 } else {
3137 /* bad. 2 NODES are trying to migrate! */
3138 mlog(ML_ERROR, "migration error mle: "
3139 "master=%u new_master=%u // request: "
3140 "master=%u new_master=%u // "
3141 "lockres=%.*s\n",
3142 tmp->master, tmp->new_master,
3143 master, new_master,
3144 namelen, name);
3145 BUG();
3146 }
3147 } else {
3148 /* this is essentially what assert_master does */
3149 tmp->master = master;
3150 atomic_set(&tmp->woken, 1);
3151 wake_up(&tmp->wq);
3152 /* remove it so that only one mle will be found */
3153 __dlm_unlink_mle(dlm, tmp);
3154 __dlm_mle_detach_hb_events(dlm, tmp);
3155 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3156 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3157 "telling master to get ref for cleared out mle "
3158 "during migration\n", dlm->name, namelen, name,
3159 master, new_master);
3160 }
3161 spin_unlock(&tmp->spinlock);
3162 }
3163
3164 /* now add a migration mle to the tail of the list */
3165 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3166 mle->new_master = new_master;
3167 /* the new master will be sending an assert master for this.
3168 * at that point we will get the refmap reference */
3169 mle->master = master;
3170 /* do this for consistency with other mle types */
3171 set_bit(new_master, mle->maybe_map);
3172 __dlm_insert_mle(dlm, mle);
3173
3174 return ret;
3175 }
3176
3177 /*
3178 * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3179 */
3180 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3181 struct dlm_master_list_entry *mle)
3182 {
3183 struct dlm_lock_resource *res;
3184
3185 /* Find the lockres associated to the mle and set its owner to UNK */
3186 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3187 mle->mnamehash);
3188 if (res) {
3189 spin_unlock(&dlm->master_lock);
3190
3191 /* move lockres onto recovery list */
3192 spin_lock(&res->spinlock);
3193 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3194 dlm_move_lockres_to_recovery_list(dlm, res);
3195 spin_unlock(&res->spinlock);
3196 dlm_lockres_put(res);
3197
3198 /* about to get rid of mle, detach from heartbeat */
3199 __dlm_mle_detach_hb_events(dlm, mle);
3200
3201 /* dump the mle */
3202 spin_lock(&dlm->master_lock);
3203 __dlm_put_mle(mle);
3204 spin_unlock(&dlm->master_lock);
3205 }
3206
3207 return res;
3208 }
3209
3210 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3211 struct dlm_master_list_entry *mle)
3212 {
3213 __dlm_mle_detach_hb_events(dlm, mle);
3214
3215 spin_lock(&mle->spinlock);
3216 __dlm_unlink_mle(dlm, mle);
3217 atomic_set(&mle->woken, 1);
3218 spin_unlock(&mle->spinlock);
3219
3220 wake_up(&mle->wq);
3221 }
3222
3223 static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3224 struct dlm_master_list_entry *mle, u8 dead_node)
3225 {
3226 int bit;
3227
3228 BUG_ON(mle->type != DLM_MLE_BLOCK);
3229
3230 spin_lock(&mle->spinlock);
3231 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3232 if (bit != dead_node) {
3233 mlog(0, "mle found, but dead node %u would not have been "
3234 "master\n", dead_node);
3235 spin_unlock(&mle->spinlock);
3236 } else {
3237 /* Must drop the refcount by one since the assert_master will
3238 * never arrive. This may result in the mle being unlinked and
3239 * freed, but there may still be a process waiting in the
3240 * dlmlock path which is fine. */
3241 mlog(0, "node %u was expected master\n", dead_node);
3242 atomic_set(&mle->woken, 1);
3243 spin_unlock(&mle->spinlock);
3244 wake_up(&mle->wq);
3245
3246 /* Do not need events any longer, so detach from heartbeat */
3247 __dlm_mle_detach_hb_events(dlm, mle);
3248 __dlm_put_mle(mle);
3249 }
3250 }
3251
3252 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3253 {
3254 struct dlm_master_list_entry *mle;
3255 struct dlm_lock_resource *res;
3256 struct hlist_head *bucket;
3257 struct hlist_node *list;
3258 unsigned int i;
3259
3260 mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
3261 top:
3262 assert_spin_locked(&dlm->spinlock);
3263
3264 /* clean the master list */
3265 spin_lock(&dlm->master_lock);
3266 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3267 bucket = dlm_master_hash(dlm, i);
3268 hlist_for_each(list, bucket) {
3269 mle = hlist_entry(list, struct dlm_master_list_entry,
3270 master_hash_node);
3271
3272 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3273 mle->type != DLM_MLE_MASTER &&
3274 mle->type != DLM_MLE_MIGRATION);
3275
3276 /* MASTER mles are initiated locally. The waiting
3277 * process will notice the node map change shortly.
3278 * Let that happen as normal. */
3279 if (mle->type == DLM_MLE_MASTER)
3280 continue;
3281
3282 /* BLOCK mles are initiated by other nodes. Need to
3283 * clean up if the dead node would have been the
3284 * master. */
3285 if (mle->type == DLM_MLE_BLOCK) {
3286 dlm_clean_block_mle(dlm, mle, dead_node);
3287 continue;
3288 }
3289
3290 /* Everything else is a MIGRATION mle */
3291
3292 /* The rule for MIGRATION mles is that the master
3293 * becomes UNKNOWN if *either* the original or the new
3294 * master dies. All UNKNOWN lockres' are sent to
3295 * whichever node becomes the recovery master. The new
3296 * master is responsible for determining if there is
3297 * still a master for this lockres, or if he needs to
3298 * take over mastery. Either way, this node should
3299 * expect another message to resolve this. */
3300
3301 if (mle->master != dead_node &&
3302 mle->new_master != dead_node)
3303 continue;
3304
3305 /* If we have reached this point, this mle needs to be
3306 * removed from the list and freed. */
3307 dlm_clean_migration_mle(dlm, mle);
3308
3309 mlog(0, "%s: node %u died during migration from "
3310 "%u to %u!\n", dlm->name, dead_node, mle->master,
3311 mle->new_master);
3312
3313 /* If we find a lockres associated with the mle, we've
3314 * hit this rare case that messes up our lock ordering.
3315 * If so, we need to drop the master lock so that we can
3316 * take the lockres lock, meaning that we will have to
3317 * restart from the head of list. */
3318 res = dlm_reset_mleres_owner(dlm, mle);
3319 if (res)
3320 /* restart */
3321 goto top;
3322
3323 /* This may be the last reference */
3324 __dlm_put_mle(mle);
3325 }
3326 }
3327 spin_unlock(&dlm->master_lock);
3328 }
3329
3330 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3331 u8 old_master)
3332 {
3333 struct dlm_node_iter iter;
3334 int ret = 0;
3335
3336 spin_lock(&dlm->spinlock);
3337 dlm_node_iter_init(dlm->domain_map, &iter);
3338 clear_bit(old_master, iter.node_map);
3339 clear_bit(dlm->node_num, iter.node_map);
3340 spin_unlock(&dlm->spinlock);
3341
3342 /* ownership of the lockres is changing. account for the
3343 * mastery reference here since old_master will briefly have
3344 * a reference after the migration completes */
3345 spin_lock(&res->spinlock);
3346 dlm_lockres_set_refmap_bit(old_master, res);
3347 spin_unlock(&res->spinlock);
3348
3349 mlog(0, "now time to do a migrate request to other nodes\n");
3350 ret = dlm_do_migrate_request(dlm, res, old_master,
3351 dlm->node_num, &iter);
3352 if (ret < 0) {
3353 mlog_errno(ret);
3354 goto leave;
3355 }
3356
3357 mlog(0, "doing assert master of %.*s to all except the original node\n",
3358 res->lockname.len, res->lockname.name);
3359 /* this call now finishes out the nodemap
3360 * even if one or more nodes die */
3361 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3362 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3363 if (ret < 0) {
3364 /* no longer need to retry. all living nodes contacted. */
3365 mlog_errno(ret);
3366 ret = 0;
3367 }
3368
3369 memset(iter.node_map, 0, sizeof(iter.node_map));
3370 set_bit(old_master, iter.node_map);
3371 mlog(0, "doing assert master of %.*s back to %u\n",
3372 res->lockname.len, res->lockname.name, old_master);
3373 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3374 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3375 if (ret < 0) {
3376 mlog(0, "assert master to original master failed "
3377 "with %d.\n", ret);
3378 /* the only nonzero status here would be because of
3379 * a dead original node. we're done. */
3380 ret = 0;
3381 }
3382
3383 /* all done, set the owner, clear the flag */
3384 spin_lock(&res->spinlock);
3385 dlm_set_lockres_owner(dlm, res, dlm->node_num);
3386 res->state &= ~DLM_LOCK_RES_MIGRATING;
3387 spin_unlock(&res->spinlock);
3388 /* re-dirty it on the new master */
3389 dlm_kick_thread(dlm, res);
3390 wake_up(&res->wq);
3391 leave:
3392 return ret;
3393 }
3394
3395 /*
3396 * LOCKRES AST REFCOUNT
3397 * this is integral to migration
3398 */
3399
3400 /* for future intent to call an ast, reserve one ahead of time.
3401 * this should be called only after waiting on the lockres
3402 * with dlm_wait_on_lockres, and while still holding the
3403 * spinlock after the call. */
3404 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3405 {
3406 assert_spin_locked(&res->spinlock);
3407 if (res->state & DLM_LOCK_RES_MIGRATING) {
3408 __dlm_print_one_lock_resource(res);
3409 }
3410 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3411
3412 atomic_inc(&res->asts_reserved);
3413 }
3414
3415 /*
3416 * used to drop the reserved ast, either because it went unused,
3417 * or because the ast/bast was actually called.
3418 *
3419 * also, if there is a pending migration on this lockres,
3420 * and this was the last pending ast on the lockres,
3421 * atomically set the MIGRATING flag before we drop the lock.
3422 * this is how we ensure that migration can proceed with no
3423 * asts in progress. note that it is ok if the state of the
3424 * queues is such that a lock should be granted in the future
3425 * or that a bast should be fired, because the new master will
3426 * shuffle the lists on this lockres as soon as it is migrated.
3427 */
3428 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3429 struct dlm_lock_resource *res)
3430 {
3431 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3432 return;
3433
3434 if (!res->migration_pending) {
3435 spin_unlock(&res->spinlock);
3436 return;
3437 }
3438
3439 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3440 res->migration_pending = 0;
3441 res->state |= DLM_LOCK_RES_MIGRATING;
3442 spin_unlock(&res->spinlock);
3443 wake_up(&res->wq);
3444 wake_up(&dlm->migration_wq);
3445 }
3446
3447 void dlm_force_free_mles(struct dlm_ctxt *dlm)
3448 {
3449 int i;
3450 struct hlist_head *bucket;
3451 struct dlm_master_list_entry *mle;
3452 struct hlist_node *tmp, *list;
3453
3454 /*
3455 * We notified all other nodes that we are exiting the domain and
3456 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3457 * around we force free them and wake any processes that are waiting
3458 * on the mles
3459 */
3460 spin_lock(&dlm->spinlock);
3461 spin_lock(&dlm->master_lock);
3462
3463 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3464 BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3465
3466 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3467 bucket = dlm_master_hash(dlm, i);
3468 hlist_for_each_safe(list, tmp, bucket) {
3469 mle = hlist_entry(list, struct dlm_master_list_entry,
3470 master_hash_node);
3471 if (mle->type != DLM_MLE_BLOCK) {
3472 mlog(ML_ERROR, "bad mle: %p\n", mle);
3473 dlm_print_one_mle(mle);
3474 }
3475 atomic_set(&mle->woken, 1);
3476 wake_up(&mle->wq);
3477
3478 __dlm_unlink_mle(dlm, mle);
3479 __dlm_mle_detach_hb_events(dlm, mle);
3480 __dlm_put_mle(mle);
3481 }
3482 }
3483 spin_unlock(&dlm->master_lock);
3484 spin_unlock(&dlm->spinlock);
3485 }