]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_failover.py
faefec458d6107bece8dca1bad283e149e13035a
[ceph.git] / ceph / qa / tasks / cephfs / test_failover.py
1 import json
2 import logging
3 from unittest import case, SkipTest
4
5 from cephfs_test_case import CephFSTestCase
6 from teuthology.exceptions import CommandFailedError
7 from teuthology import misc as teuthology
8 from tasks.cephfs.fuse_mount import FuseMount
9
10 log = logging.getLogger(__name__)
11
12
13 class TestFailover(CephFSTestCase):
14 CLIENTS_REQUIRED = 1
15 MDSS_REQUIRED = 2
16
17 def test_simple(self):
18 """
19 That when the active MDS is killed, a standby MDS is promoted into
20 its rank after the grace period.
21
22 This is just a simple unit test, the harder cases are covered
23 in thrashing tests.
24 """
25
26 # Need all my standbys up as well as the active daemons
27 self.wait_for_daemon_start()
28
29 (original_active, ) = self.fs.get_active_names()
30 original_standbys = self.mds_cluster.get_standby_daemons()
31
32 # Kill the rank 0 daemon's physical process
33 self.fs.mds_stop(original_active)
34
35 grace = int(self.fs.get_config("mds_beacon_grace", service_type="mon"))
36
37 # Wait until the monitor promotes his replacement
38 def promoted():
39 active = self.fs.get_active_names()
40 return active and active[0] in original_standbys
41
42 log.info("Waiting for promotion of one of the original standbys {0}".format(
43 original_standbys))
44 self.wait_until_true(
45 promoted,
46 timeout=grace*2)
47
48 # Start the original rank 0 daemon up again, see that he becomes a standby
49 self.fs.mds_restart(original_active)
50 self.wait_until_true(
51 lambda: original_active in self.mds_cluster.get_standby_daemons(),
52 timeout=60 # Approximately long enough for MDS to start and mon to notice
53 )
54
55 def test_client_abort(self):
56 """
57 That a client will respect fuse_require_active_mds and error out
58 when the cluster appears to be unavailable.
59 """
60
61 if not isinstance(self.mount_a, FuseMount):
62 raise SkipTest("Requires FUSE client to inject client metadata")
63
64 require_active = self.fs.get_config("fuse_require_active_mds", service_type="mon").lower() == "true"
65 if not require_active:
66 raise case.SkipTest("fuse_require_active_mds is not set")
67
68 grace = int(self.fs.get_config("mds_beacon_grace", service_type="mon"))
69
70 # Check it's not laggy to begin with
71 (original_active, ) = self.fs.get_active_names()
72 self.assertNotIn("laggy_since", self.fs.mon_manager.get_mds_status(original_active))
73
74 self.mounts[0].umount_wait()
75
76 # Control: that we can mount and unmount usually, while the cluster is healthy
77 self.mounts[0].mount()
78 self.mounts[0].wait_until_mounted()
79 self.mounts[0].umount_wait()
80
81 # Stop the daemon processes
82 self.fs.mds_stop()
83
84 # Wait for everyone to go laggy
85 def laggy():
86 mdsmap = self.fs.get_mds_map()
87 for info in mdsmap['info'].values():
88 if "laggy_since" not in info:
89 return False
90
91 return True
92
93 self.wait_until_true(laggy, grace * 2)
94 with self.assertRaises(CommandFailedError):
95 self.mounts[0].mount()
96
97 def test_standby_count_wanted(self):
98 """
99 That cluster health warnings are generated by insufficient standbys available.
100 """
101
102 # Need all my standbys up as well as the active daemons
103 self.wait_for_daemon_start()
104
105 grace = int(self.fs.get_config("mds_beacon_grace", service_type="mon"))
106
107 standbys = self.mds_cluster.get_standby_daemons()
108 self.assertGreaterEqual(len(standbys), 1)
109 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
110
111 # Kill a standby and check for warning
112 victim = standbys.pop()
113 self.fs.mds_stop(victim)
114 log.info("waiting for insufficient standby daemon warning")
115 self.wait_for_health("insufficient standby daemons available", grace*2)
116
117 # restart the standby, see that he becomes a standby, check health clears
118 self.fs.mds_restart(victim)
119 self.wait_until_true(
120 lambda: victim in self.mds_cluster.get_standby_daemons(),
121 timeout=60 # Approximately long enough for MDS to start and mon to notice
122 )
123 self.wait_for_health_clear(timeout=30)
124
125 # Set it one greater than standbys ever seen
126 standbys = self.mds_cluster.get_standby_daemons()
127 self.assertGreaterEqual(len(standbys), 1)
128 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
129 log.info("waiting for insufficient standby daemon warning")
130 self.wait_for_health("insufficient standby daemons available", grace*2)
131
132 # Set it to 0
133 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
134 self.wait_for_health_clear(timeout=30)
135
136
137
138
139 class TestStandbyReplay(CephFSTestCase):
140 MDSS_REQUIRED = 4
141 REQUIRE_FILESYSTEM = False
142
143 def set_standby_for(self, leader, follower, replay):
144 self.set_conf("mds.{0}".format(follower), "mds_standby_for_name", leader)
145 if replay:
146 self.set_conf("mds.{0}".format(follower), "mds_standby_replay", "true")
147
148 def get_info_by_name(self, mds_name):
149 status = self.mds_cluster.status()
150 info = status.get_mds(mds_name)
151 if info is None:
152 log.warn(str(status))
153 raise RuntimeError("MDS '{0}' not found".format(mds_name))
154 else:
155 return info
156
157 def test_standby_replay_unused(self):
158 # Pick out exactly 3 daemons to be run during test
159 use_daemons = sorted(self.mds_cluster.mds_ids[0:3])
160 mds_a, mds_b, mds_c = use_daemons
161 log.info("Using MDS daemons: {0}".format(use_daemons))
162
163 # B and C should both follow A, but only one will
164 # really get into standby replay state.
165 self.set_standby_for(mds_a, mds_b, True)
166 self.set_standby_for(mds_a, mds_c, True)
167
168 # Create FS and start A
169 fs_a = self.mds_cluster.newfs("alpha")
170 self.mds_cluster.mds_restart(mds_a)
171 fs_a.wait_for_daemons()
172 self.assertEqual(fs_a.get_active_names(), [mds_a])
173
174 # Start B, he should go into standby replay
175 self.mds_cluster.mds_restart(mds_b)
176 self.wait_for_daemon_start([mds_b])
177 info_b = self.get_info_by_name(mds_b)
178 self.assertEqual(info_b['state'], "up:standby-replay")
179 self.assertEqual(info_b['standby_for_name'], mds_a)
180 self.assertEqual(info_b['rank'], 0)
181
182 # Start C, he should go into standby (*not* replay)
183 self.mds_cluster.mds_restart(mds_c)
184 self.wait_for_daemon_start([mds_c])
185 info_c = self.get_info_by_name(mds_c)
186 self.assertEqual(info_c['state'], "up:standby")
187 self.assertEqual(info_c['standby_for_name'], mds_a)
188 self.assertEqual(info_c['rank'], -1)
189
190 # Kill B, C should go into standby replay
191 self.mds_cluster.mds_stop(mds_b)
192 self.mds_cluster.mds_fail(mds_b)
193 self.wait_until_equal(
194 lambda: self.get_info_by_name(mds_c)['state'],
195 "up:standby-replay",
196 60)
197 info_c = self.get_info_by_name(mds_c)
198 self.assertEqual(info_c['state'], "up:standby-replay")
199 self.assertEqual(info_c['standby_for_name'], mds_a)
200 self.assertEqual(info_c['rank'], 0)
201
202 def test_standby_failure(self):
203 """
204 That the failure of a standby-replay daemon happens cleanly
205 and doesn't interrupt anything else.
206 """
207 # Pick out exactly 2 daemons to be run during test
208 use_daemons = sorted(self.mds_cluster.mds_ids[0:2])
209 mds_a, mds_b = use_daemons
210 log.info("Using MDS daemons: {0}".format(use_daemons))
211
212 # Configure two pairs of MDSs that are standby for each other
213 self.set_standby_for(mds_a, mds_b, True)
214 self.set_standby_for(mds_b, mds_a, False)
215
216 # Create FS alpha and get mds_a to come up as active
217 fs_a = self.mds_cluster.newfs("alpha")
218 self.mds_cluster.mds_restart(mds_a)
219 fs_a.wait_for_daemons()
220 self.assertEqual(fs_a.get_active_names(), [mds_a])
221
222 # Start the standbys
223 self.mds_cluster.mds_restart(mds_b)
224 self.wait_for_daemon_start([mds_b])
225
226 # See the standby come up as the correct rank
227 info_b = self.get_info_by_name(mds_b)
228 self.assertEqual(info_b['state'], "up:standby-replay")
229 self.assertEqual(info_b['standby_for_name'], mds_a)
230 self.assertEqual(info_b['rank'], 0)
231
232 # Kill the standby
233 self.mds_cluster.mds_stop(mds_b)
234 self.mds_cluster.mds_fail(mds_b)
235
236 # See that the standby is gone and the active remains
237 self.assertEqual(fs_a.get_active_names(), [mds_a])
238 mds_map = fs_a.get_mds_map()
239 self.assertEqual(len(mds_map['info']), 1)
240 self.assertEqual(mds_map['failed'], [])
241 self.assertEqual(mds_map['damaged'], [])
242 self.assertEqual(mds_map['stopped'], [])
243
244 def test_rank_stopped(self):
245 """
246 That when a rank is STOPPED, standby replays for
247 that rank get torn down
248 """
249 # Pick out exactly 2 daemons to be run during test
250 use_daemons = sorted(self.mds_cluster.mds_ids[0:4])
251 mds_a, mds_b, mds_a_s, mds_b_s = use_daemons
252 log.info("Using MDS daemons: {0}".format(use_daemons))
253
254 # a and b both get a standby
255 self.set_standby_for(mds_a, mds_a_s, True)
256 self.set_standby_for(mds_b, mds_b_s, True)
257
258 # Create FS alpha and get mds_a to come up as active
259 fs_a = self.mds_cluster.newfs("alpha")
260 fs_a.set_allow_multimds(True)
261 fs_a.set_max_mds(2)
262
263 self.mds_cluster.mds_restart(mds_a)
264 self.wait_until_equal(lambda: fs_a.get_active_names(), [mds_a], 30)
265 self.mds_cluster.mds_restart(mds_b)
266 fs_a.wait_for_daemons()
267 self.assertEqual(sorted(fs_a.get_active_names()), [mds_a, mds_b])
268
269 # Start the standbys
270 self.mds_cluster.mds_restart(mds_b_s)
271 self.wait_for_daemon_start([mds_b_s])
272 self.mds_cluster.mds_restart(mds_a_s)
273 self.wait_for_daemon_start([mds_a_s])
274 info_b_s = self.get_info_by_name(mds_b_s)
275 self.assertEqual(info_b_s['state'], "up:standby-replay")
276 info_a_s = self.get_info_by_name(mds_a_s)
277 self.assertEqual(info_a_s['state'], "up:standby-replay")
278
279 # Shrink the cluster
280 fs_a.set_max_mds(1)
281 fs_a.mon_manager.raw_cluster_cmd("mds", "stop", "{0}:1".format(fs_a.name))
282 self.wait_until_equal(
283 lambda: fs_a.get_active_names(), [mds_a],
284 60
285 )
286
287 # Both 'b' and 'b_s' should go back to being standbys
288 self.wait_until_equal(
289 lambda: self.mds_cluster.get_standby_daemons(), {mds_b, mds_b_s},
290 60
291 )
292
293
294 class TestMultiFilesystems(CephFSTestCase):
295 CLIENTS_REQUIRED = 2
296 MDSS_REQUIRED = 4
297
298 # We'll create our own filesystems and start our own daemons
299 REQUIRE_FILESYSTEM = False
300
301 def setUp(self):
302 super(TestMultiFilesystems, self).setUp()
303 self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
304 "enable_multiple", "true",
305 "--yes-i-really-mean-it")
306
307 def _setup_two(self):
308 fs_a = self.mds_cluster.newfs("alpha")
309 fs_b = self.mds_cluster.newfs("bravo")
310
311 self.mds_cluster.mds_restart()
312
313 # Wait for both filesystems to go healthy
314 fs_a.wait_for_daemons()
315 fs_b.wait_for_daemons()
316
317 # Reconfigure client auth caps
318 for mount in self.mounts:
319 self.mds_cluster.mon_manager.raw_cluster_cmd_result(
320 'auth', 'caps', "client.{0}".format(mount.client_id),
321 'mds', 'allow',
322 'mon', 'allow r',
323 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
324 fs_a.get_data_pool_name(), fs_b.get_data_pool_name()))
325
326 return fs_a, fs_b
327
328 def test_clients(self):
329 fs_a, fs_b = self._setup_two()
330
331 # Mount a client on fs_a
332 self.mount_a.mount(mount_fs_name=fs_a.name)
333 self.mount_a.write_n_mb("pad.bin", 1)
334 self.mount_a.write_n_mb("test.bin", 2)
335 a_created_ino = self.mount_a.path_to_ino("test.bin")
336 self.mount_a.create_files()
337
338 # Mount a client on fs_b
339 self.mount_b.mount(mount_fs_name=fs_b.name)
340 self.mount_b.write_n_mb("test.bin", 1)
341 b_created_ino = self.mount_b.path_to_ino("test.bin")
342 self.mount_b.create_files()
343
344 # Check that a non-default filesystem mount survives an MDS
345 # failover (i.e. that map subscription is continuous, not
346 # just the first time), reproduces #16022
347 old_fs_b_mds = fs_b.get_active_names()[0]
348 self.mds_cluster.mds_stop(old_fs_b_mds)
349 self.mds_cluster.mds_fail(old_fs_b_mds)
350 fs_b.wait_for_daemons()
351 background = self.mount_b.write_background()
352 # Raise exception if the write doesn't finish (i.e. if client
353 # has not kept up with MDS failure)
354 try:
355 self.wait_until_true(lambda: background.finished, timeout=30)
356 except RuntimeError:
357 # The mount is stuck, we'll have to force it to fail cleanly
358 background.stdin.close()
359 self.mount_b.umount_wait(force=True)
360 raise
361
362 self.mount_a.umount_wait()
363 self.mount_b.umount_wait()
364
365 # See that the client's files went into the correct pool
366 self.assertTrue(fs_a.data_objects_present(a_created_ino, 1024 * 1024))
367 self.assertTrue(fs_b.data_objects_present(b_created_ino, 1024 * 1024))
368
369 def test_standby(self):
370 fs_a, fs_b = self._setup_two()
371
372 # Assert that the remaining two MDS daemons are now standbys
373 a_daemons = fs_a.get_active_names()
374 b_daemons = fs_b.get_active_names()
375 self.assertEqual(len(a_daemons), 1)
376 self.assertEqual(len(b_daemons), 1)
377 original_a = a_daemons[0]
378 original_b = b_daemons[0]
379 expect_standby_daemons = set(self.mds_cluster.mds_ids) - (set(a_daemons) | set(b_daemons))
380
381 # Need all my standbys up as well as the active daemons
382 self.wait_for_daemon_start()
383 self.assertEqual(expect_standby_daemons, self.mds_cluster.get_standby_daemons())
384
385 # Kill fs_a's active MDS, see a standby take over
386 self.mds_cluster.mds_stop(original_a)
387 self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_a)
388 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
389 reject_fn=lambda v: v > 1)
390 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
391 self.assertNotEqual(fs_a.get_active_names()[0], original_a)
392
393 # Kill fs_b's active MDS, see a standby take over
394 self.mds_cluster.mds_stop(original_b)
395 self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_b)
396 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
397 reject_fn=lambda v: v > 1)
398 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
399 self.assertNotEqual(fs_b.get_active_names()[0], original_b)
400
401 # Both of the original active daemons should be gone, and all standbys used up
402 self.assertEqual(self.mds_cluster.get_standby_daemons(), set())
403
404 # Restart the ones I killed, see them reappear as standbys
405 self.mds_cluster.mds_restart(original_a)
406 self.mds_cluster.mds_restart(original_b)
407 self.wait_until_true(
408 lambda: {original_a, original_b} == self.mds_cluster.get_standby_daemons(),
409 timeout=30
410 )
411
412 def test_grow_shrink(self):
413 # Usual setup...
414 fs_a, fs_b = self._setup_two()
415 fs_a.set_allow_multimds(True)
416 fs_b.set_allow_multimds(True)
417
418 # Increase max_mds on fs_b, see a standby take up the role
419 fs_b.set_max_mds(2)
420 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 2, 30,
421 reject_fn=lambda v: v > 2 or v < 1)
422
423 # Increase max_mds on fs_a, see a standby take up the role
424 fs_a.set_max_mds(2)
425 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 2, 30,
426 reject_fn=lambda v: v > 2 or v < 1)
427
428 # Shrink fs_b back to 1, see a daemon go back to standby
429 fs_b.set_max_mds(1)
430 fs_b.deactivate(1)
431 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
432 reject_fn=lambda v: v > 2 or v < 1)
433
434 # Grow fs_a up to 3, see the former fs_b daemon join it.
435 fs_a.set_max_mds(3)
436 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 3, 60,
437 reject_fn=lambda v: v > 3 or v < 2)
438
439 def test_standby_for_name(self):
440 # Pick out exactly 4 daemons to be run during test
441 use_daemons = sorted(self.mds_cluster.mds_ids[0:4])
442 mds_a, mds_b, mds_c, mds_d = use_daemons
443 log.info("Using MDS daemons: {0}".format(use_daemons))
444
445 def set_standby_for(leader, follower, replay):
446 self.set_conf("mds.{0}".format(follower), "mds_standby_for_name", leader)
447 if replay:
448 self.set_conf("mds.{0}".format(follower), "mds_standby_replay", "true")
449
450 # Configure two pairs of MDSs that are standby for each other
451 set_standby_for(mds_a, mds_b, True)
452 set_standby_for(mds_b, mds_a, False)
453 set_standby_for(mds_c, mds_d, True)
454 set_standby_for(mds_d, mds_c, False)
455
456 # Create FS alpha and get mds_a to come up as active
457 fs_a = self.mds_cluster.newfs("alpha")
458 self.mds_cluster.mds_restart(mds_a)
459 fs_a.wait_for_daemons()
460 self.assertEqual(fs_a.get_active_names(), [mds_a])
461
462 # Create FS bravo and get mds_c to come up as active
463 fs_b = self.mds_cluster.newfs("bravo")
464 self.mds_cluster.mds_restart(mds_c)
465 fs_b.wait_for_daemons()
466 self.assertEqual(fs_b.get_active_names(), [mds_c])
467
468 # Start the standbys
469 self.mds_cluster.mds_restart(mds_b)
470 self.mds_cluster.mds_restart(mds_d)
471 self.wait_for_daemon_start([mds_b, mds_d])
472
473 def get_info_by_name(fs, mds_name):
474 mds_map = fs.get_mds_map()
475 for gid_str, info in mds_map['info'].items():
476 if info['name'] == mds_name:
477 return info
478
479 log.warn(json.dumps(mds_map, indent=2))
480 raise RuntimeError("MDS '{0}' not found in filesystem MDSMap".format(mds_name))
481
482 # See both standbys come up as standby replay for the correct ranks
483 # mds_b should be in filesystem alpha following mds_a
484 info_b = get_info_by_name(fs_a, mds_b)
485 self.assertEqual(info_b['state'], "up:standby-replay")
486 self.assertEqual(info_b['standby_for_name'], mds_a)
487 self.assertEqual(info_b['rank'], 0)
488 # mds_d should be in filesystem alpha following mds_c
489 info_d = get_info_by_name(fs_b, mds_d)
490 self.assertEqual(info_d['state'], "up:standby-replay")
491 self.assertEqual(info_d['standby_for_name'], mds_c)
492 self.assertEqual(info_d['rank'], 0)
493
494 # Kill both active daemons
495 self.mds_cluster.mds_stop(mds_a)
496 self.mds_cluster.mds_fail(mds_a)
497 self.mds_cluster.mds_stop(mds_c)
498 self.mds_cluster.mds_fail(mds_c)
499
500 # Wait for standbys to take over
501 fs_a.wait_for_daemons()
502 self.assertEqual(fs_a.get_active_names(), [mds_b])
503 fs_b.wait_for_daemons()
504 self.assertEqual(fs_b.get_active_names(), [mds_d])
505
506 # Start the original active daemons up again
507 self.mds_cluster.mds_restart(mds_a)
508 self.mds_cluster.mds_restart(mds_c)
509 self.wait_for_daemon_start([mds_a, mds_c])
510
511 self.assertEqual(set(self.mds_cluster.get_standby_daemons()),
512 {mds_a, mds_c})
513
514 def test_standby_for_rank(self):
515 use_daemons = sorted(self.mds_cluster.mds_ids[0:4])
516 mds_a, mds_b, mds_c, mds_d = use_daemons
517 log.info("Using MDS daemons: {0}".format(use_daemons))
518
519 def set_standby_for(leader_rank, leader_fs, follower_id):
520 self.set_conf("mds.{0}".format(follower_id),
521 "mds_standby_for_rank", leader_rank)
522
523 fscid = leader_fs.get_namespace_id()
524 self.set_conf("mds.{0}".format(follower_id),
525 "mds_standby_for_fscid", fscid)
526
527 fs_a = self.mds_cluster.newfs("alpha")
528 fs_b = self.mds_cluster.newfs("bravo")
529 set_standby_for(0, fs_a, mds_a)
530 set_standby_for(0, fs_a, mds_b)
531 set_standby_for(0, fs_b, mds_c)
532 set_standby_for(0, fs_b, mds_d)
533
534 self.mds_cluster.mds_restart(mds_a)
535 fs_a.wait_for_daemons()
536 self.assertEqual(fs_a.get_active_names(), [mds_a])
537
538 self.mds_cluster.mds_restart(mds_c)
539 fs_b.wait_for_daemons()
540 self.assertEqual(fs_b.get_active_names(), [mds_c])
541
542 self.mds_cluster.mds_restart(mds_b)
543 self.mds_cluster.mds_restart(mds_d)
544 self.wait_for_daemon_start([mds_b, mds_d])
545
546 self.mds_cluster.mds_stop(mds_a)
547 self.mds_cluster.mds_fail(mds_a)
548 self.mds_cluster.mds_stop(mds_c)
549 self.mds_cluster.mds_fail(mds_c)
550
551 fs_a.wait_for_daemons()
552 self.assertEqual(fs_a.get_active_names(), [mds_b])
553 fs_b.wait_for_daemons()
554 self.assertEqual(fs_b.get_active_names(), [mds_d])
555
556 def test_standby_for_fscid(self):
557 """
558 That I can set a standby FSCID with no rank, and the result is
559 that daemons join any rank for that filesystem.
560 """
561 use_daemons = sorted(self.mds_cluster.mds_ids[0:4])
562 mds_a, mds_b, mds_c, mds_d = use_daemons
563
564 log.info("Using MDS daemons: {0}".format(use_daemons))
565
566 def set_standby_for(leader_fs, follower_id):
567 fscid = leader_fs.get_namespace_id()
568 self.set_conf("mds.{0}".format(follower_id),
569 "mds_standby_for_fscid", fscid)
570
571 # Create two filesystems which should have two ranks each
572 fs_a = self.mds_cluster.newfs("alpha")
573 fs_a.set_allow_multimds(True)
574
575 fs_b = self.mds_cluster.newfs("bravo")
576 fs_b.set_allow_multimds(True)
577
578 fs_a.set_max_mds(2)
579 fs_b.set_max_mds(2)
580
581 # Set all the daemons to have a FSCID assignment but no other
582 # standby preferences.
583 set_standby_for(fs_a, mds_a)
584 set_standby_for(fs_a, mds_b)
585 set_standby_for(fs_b, mds_c)
586 set_standby_for(fs_b, mds_d)
587
588 # Now when we start all daemons at once, they should fall into
589 # ranks in the right filesystem
590 self.mds_cluster.mds_restart(mds_a)
591 self.mds_cluster.mds_restart(mds_b)
592 self.mds_cluster.mds_restart(mds_c)
593 self.mds_cluster.mds_restart(mds_d)
594 self.wait_for_daemon_start([mds_a, mds_b, mds_c, mds_d])
595 fs_a.wait_for_daemons()
596 fs_b.wait_for_daemons()
597 self.assertEqual(set(fs_a.get_active_names()), {mds_a, mds_b})
598 self.assertEqual(set(fs_b.get_active_names()), {mds_c, mds_d})
599
600 def test_standby_for_invalid_fscid(self):
601 """
602 That an invalid standby_fscid does not cause a mon crash
603 """
604 use_daemons = sorted(self.mds_cluster.mds_ids[0:3])
605 mds_a, mds_b, mds_c = use_daemons
606 log.info("Using MDS daemons: {0}".format(use_daemons))
607
608 def set_standby_for_rank(leader_rank, follower_id):
609 self.set_conf("mds.{0}".format(follower_id),
610 "mds_standby_for_rank", leader_rank)
611
612 # Create one fs
613 fs_a = self.mds_cluster.newfs("cephfs")
614
615 # Get configured mons in the cluster, so we can see if any
616 # crashed later.
617 configured_mons = fs_a.mon_manager.get_mon_quorum()
618
619 # Set all the daemons to have a rank assignment but no other
620 # standby preferences.
621 set_standby_for_rank(0, mds_a)
622 set_standby_for_rank(0, mds_b)
623
624 # Set third daemon to have invalid fscid assignment and no other
625 # standby preferences
626 invalid_fscid = 123
627 self.set_conf("mds.{0}".format(mds_c), "mds_standby_for_fscid", invalid_fscid)
628
629 #Restart all the daemons to make the standby preference applied
630 self.mds_cluster.mds_restart(mds_a)
631 self.mds_cluster.mds_restart(mds_b)
632 self.mds_cluster.mds_restart(mds_c)
633 self.wait_for_daemon_start([mds_a, mds_b, mds_c])
634
635 #Stop active mds daemon service of fs
636 if (fs_a.get_active_names(), [mds_a]):
637 self.mds_cluster.mds_stop(mds_a)
638 self.mds_cluster.mds_fail(mds_a)
639 fs_a.wait_for_daemons()
640 else:
641 self.mds_cluster.mds_stop(mds_b)
642 self.mds_cluster.mds_fail(mds_b)
643 fs_a.wait_for_daemons()
644
645 #Get active mons from cluster
646 active_mons = fs_a.mon_manager.get_mon_quorum()
647
648 #Check for active quorum mon status and configured mon status
649 self.assertEqual(active_mons, configured_mons,
650 "Not all mons are in quorum Invalid standby invalid fscid test failed!")