]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_failover.py
faefec458d6107bece8dca1bad283e149e13035a
3 from unittest
import case
, SkipTest
5 from cephfs_test_case
import CephFSTestCase
6 from teuthology
.exceptions
import CommandFailedError
7 from teuthology
import misc
as teuthology
8 from tasks
.cephfs
.fuse_mount
import FuseMount
10 log
= logging
.getLogger(__name__
)
13 class TestFailover(CephFSTestCase
):
17 def test_simple(self
):
19 That when the active MDS is killed, a standby MDS is promoted into
20 its rank after the grace period.
22 This is just a simple unit test, the harder cases are covered
26 # Need all my standbys up as well as the active daemons
27 self
.wait_for_daemon_start()
29 (original_active
, ) = self
.fs
.get_active_names()
30 original_standbys
= self
.mds_cluster
.get_standby_daemons()
32 # Kill the rank 0 daemon's physical process
33 self
.fs
.mds_stop(original_active
)
35 grace
= int(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
37 # Wait until the monitor promotes his replacement
39 active
= self
.fs
.get_active_names()
40 return active
and active
[0] in original_standbys
42 log
.info("Waiting for promotion of one of the original standbys {0}".format(
48 # Start the original rank 0 daemon up again, see that he becomes a standby
49 self
.fs
.mds_restart(original_active
)
51 lambda: original_active
in self
.mds_cluster
.get_standby_daemons(),
52 timeout
=60 # Approximately long enough for MDS to start and mon to notice
55 def test_client_abort(self
):
57 That a client will respect fuse_require_active_mds and error out
58 when the cluster appears to be unavailable.
61 if not isinstance(self
.mount_a
, FuseMount
):
62 raise SkipTest("Requires FUSE client to inject client metadata")
64 require_active
= self
.fs
.get_config("fuse_require_active_mds", service_type
="mon").lower() == "true"
65 if not require_active
:
66 raise case
.SkipTest("fuse_require_active_mds is not set")
68 grace
= int(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
70 # Check it's not laggy to begin with
71 (original_active
, ) = self
.fs
.get_active_names()
72 self
.assertNotIn("laggy_since", self
.fs
.mon_manager
.get_mds_status(original_active
))
74 self
.mounts
[0].umount_wait()
76 # Control: that we can mount and unmount usually, while the cluster is healthy
77 self
.mounts
[0].mount()
78 self
.mounts
[0].wait_until_mounted()
79 self
.mounts
[0].umount_wait()
81 # Stop the daemon processes
84 # Wait for everyone to go laggy
86 mdsmap
= self
.fs
.get_mds_map()
87 for info
in mdsmap
['info'].values():
88 if "laggy_since" not in info
:
93 self
.wait_until_true(laggy
, grace
* 2)
94 with self
.assertRaises(CommandFailedError
):
95 self
.mounts
[0].mount()
97 def test_standby_count_wanted(self
):
99 That cluster health warnings are generated by insufficient standbys available.
102 # Need all my standbys up as well as the active daemons
103 self
.wait_for_daemon_start()
105 grace
= int(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
107 standbys
= self
.mds_cluster
.get_standby_daemons()
108 self
.assertGreaterEqual(len(standbys
), 1)
109 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', str(len(standbys
)))
111 # Kill a standby and check for warning
112 victim
= standbys
.pop()
113 self
.fs
.mds_stop(victim
)
114 log
.info("waiting for insufficient standby daemon warning")
115 self
.wait_for_health("insufficient standby daemons available", grace
*2)
117 # restart the standby, see that he becomes a standby, check health clears
118 self
.fs
.mds_restart(victim
)
119 self
.wait_until_true(
120 lambda: victim
in self
.mds_cluster
.get_standby_daemons(),
121 timeout
=60 # Approximately long enough for MDS to start and mon to notice
123 self
.wait_for_health_clear(timeout
=30)
125 # Set it one greater than standbys ever seen
126 standbys
= self
.mds_cluster
.get_standby_daemons()
127 self
.assertGreaterEqual(len(standbys
), 1)
128 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', str(len(standbys
)+1))
129 log
.info("waiting for insufficient standby daemon warning")
130 self
.wait_for_health("insufficient standby daemons available", grace
*2)
133 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', '0')
134 self
.wait_for_health_clear(timeout
=30)
139 class TestStandbyReplay(CephFSTestCase
):
141 REQUIRE_FILESYSTEM
= False
143 def set_standby_for(self
, leader
, follower
, replay
):
144 self
.set_conf("mds.{0}".format(follower
), "mds_standby_for_name", leader
)
146 self
.set_conf("mds.{0}".format(follower
), "mds_standby_replay", "true")
148 def get_info_by_name(self
, mds_name
):
149 status
= self
.mds_cluster
.status()
150 info
= status
.get_mds(mds_name
)
152 log
.warn(str(status
))
153 raise RuntimeError("MDS '{0}' not found".format(mds_name
))
157 def test_standby_replay_unused(self
):
158 # Pick out exactly 3 daemons to be run during test
159 use_daemons
= sorted(self
.mds_cluster
.mds_ids
[0:3])
160 mds_a
, mds_b
, mds_c
= use_daemons
161 log
.info("Using MDS daemons: {0}".format(use_daemons
))
163 # B and C should both follow A, but only one will
164 # really get into standby replay state.
165 self
.set_standby_for(mds_a
, mds_b
, True)
166 self
.set_standby_for(mds_a
, mds_c
, True)
168 # Create FS and start A
169 fs_a
= self
.mds_cluster
.newfs("alpha")
170 self
.mds_cluster
.mds_restart(mds_a
)
171 fs_a
.wait_for_daemons()
172 self
.assertEqual(fs_a
.get_active_names(), [mds_a
])
174 # Start B, he should go into standby replay
175 self
.mds_cluster
.mds_restart(mds_b
)
176 self
.wait_for_daemon_start([mds_b
])
177 info_b
= self
.get_info_by_name(mds_b
)
178 self
.assertEqual(info_b
['state'], "up:standby-replay")
179 self
.assertEqual(info_b
['standby_for_name'], mds_a
)
180 self
.assertEqual(info_b
['rank'], 0)
182 # Start C, he should go into standby (*not* replay)
183 self
.mds_cluster
.mds_restart(mds_c
)
184 self
.wait_for_daemon_start([mds_c
])
185 info_c
= self
.get_info_by_name(mds_c
)
186 self
.assertEqual(info_c
['state'], "up:standby")
187 self
.assertEqual(info_c
['standby_for_name'], mds_a
)
188 self
.assertEqual(info_c
['rank'], -1)
190 # Kill B, C should go into standby replay
191 self
.mds_cluster
.mds_stop(mds_b
)
192 self
.mds_cluster
.mds_fail(mds_b
)
193 self
.wait_until_equal(
194 lambda: self
.get_info_by_name(mds_c
)['state'],
197 info_c
= self
.get_info_by_name(mds_c
)
198 self
.assertEqual(info_c
['state'], "up:standby-replay")
199 self
.assertEqual(info_c
['standby_for_name'], mds_a
)
200 self
.assertEqual(info_c
['rank'], 0)
202 def test_standby_failure(self
):
204 That the failure of a standby-replay daemon happens cleanly
205 and doesn't interrupt anything else.
207 # Pick out exactly 2 daemons to be run during test
208 use_daemons
= sorted(self
.mds_cluster
.mds_ids
[0:2])
209 mds_a
, mds_b
= use_daemons
210 log
.info("Using MDS daemons: {0}".format(use_daemons
))
212 # Configure two pairs of MDSs that are standby for each other
213 self
.set_standby_for(mds_a
, mds_b
, True)
214 self
.set_standby_for(mds_b
, mds_a
, False)
216 # Create FS alpha and get mds_a to come up as active
217 fs_a
= self
.mds_cluster
.newfs("alpha")
218 self
.mds_cluster
.mds_restart(mds_a
)
219 fs_a
.wait_for_daemons()
220 self
.assertEqual(fs_a
.get_active_names(), [mds_a
])
223 self
.mds_cluster
.mds_restart(mds_b
)
224 self
.wait_for_daemon_start([mds_b
])
226 # See the standby come up as the correct rank
227 info_b
= self
.get_info_by_name(mds_b
)
228 self
.assertEqual(info_b
['state'], "up:standby-replay")
229 self
.assertEqual(info_b
['standby_for_name'], mds_a
)
230 self
.assertEqual(info_b
['rank'], 0)
233 self
.mds_cluster
.mds_stop(mds_b
)
234 self
.mds_cluster
.mds_fail(mds_b
)
236 # See that the standby is gone and the active remains
237 self
.assertEqual(fs_a
.get_active_names(), [mds_a
])
238 mds_map
= fs_a
.get_mds_map()
239 self
.assertEqual(len(mds_map
['info']), 1)
240 self
.assertEqual(mds_map
['failed'], [])
241 self
.assertEqual(mds_map
['damaged'], [])
242 self
.assertEqual(mds_map
['stopped'], [])
244 def test_rank_stopped(self
):
246 That when a rank is STOPPED, standby replays for
247 that rank get torn down
249 # Pick out exactly 2 daemons to be run during test
250 use_daemons
= sorted(self
.mds_cluster
.mds_ids
[0:4])
251 mds_a
, mds_b
, mds_a_s
, mds_b_s
= use_daemons
252 log
.info("Using MDS daemons: {0}".format(use_daemons
))
254 # a and b both get a standby
255 self
.set_standby_for(mds_a
, mds_a_s
, True)
256 self
.set_standby_for(mds_b
, mds_b_s
, True)
258 # Create FS alpha and get mds_a to come up as active
259 fs_a
= self
.mds_cluster
.newfs("alpha")
260 fs_a
.set_allow_multimds(True)
263 self
.mds_cluster
.mds_restart(mds_a
)
264 self
.wait_until_equal(lambda: fs_a
.get_active_names(), [mds_a
], 30)
265 self
.mds_cluster
.mds_restart(mds_b
)
266 fs_a
.wait_for_daemons()
267 self
.assertEqual(sorted(fs_a
.get_active_names()), [mds_a
, mds_b
])
270 self
.mds_cluster
.mds_restart(mds_b_s
)
271 self
.wait_for_daemon_start([mds_b_s
])
272 self
.mds_cluster
.mds_restart(mds_a_s
)
273 self
.wait_for_daemon_start([mds_a_s
])
274 info_b_s
= self
.get_info_by_name(mds_b_s
)
275 self
.assertEqual(info_b_s
['state'], "up:standby-replay")
276 info_a_s
= self
.get_info_by_name(mds_a_s
)
277 self
.assertEqual(info_a_s
['state'], "up:standby-replay")
281 fs_a
.mon_manager
.raw_cluster_cmd("mds", "stop", "{0}:1".format(fs_a
.name
))
282 self
.wait_until_equal(
283 lambda: fs_a
.get_active_names(), [mds_a
],
287 # Both 'b' and 'b_s' should go back to being standbys
288 self
.wait_until_equal(
289 lambda: self
.mds_cluster
.get_standby_daemons(), {mds_b
, mds_b_s
},
294 class TestMultiFilesystems(CephFSTestCase
):
298 # We'll create our own filesystems and start our own daemons
299 REQUIRE_FILESYSTEM
= False
302 super(TestMultiFilesystems
, self
).setUp()
303 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("fs", "flag", "set",
304 "enable_multiple", "true",
305 "--yes-i-really-mean-it")
307 def _setup_two(self
):
308 fs_a
= self
.mds_cluster
.newfs("alpha")
309 fs_b
= self
.mds_cluster
.newfs("bravo")
311 self
.mds_cluster
.mds_restart()
313 # Wait for both filesystems to go healthy
314 fs_a
.wait_for_daemons()
315 fs_b
.wait_for_daemons()
317 # Reconfigure client auth caps
318 for mount
in self
.mounts
:
319 self
.mds_cluster
.mon_manager
.raw_cluster_cmd_result(
320 'auth', 'caps', "client.{0}".format(mount
.client_id
),
323 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
324 fs_a
.get_data_pool_name(), fs_b
.get_data_pool_name()))
328 def test_clients(self
):
329 fs_a
, fs_b
= self
._setup
_two
()
331 # Mount a client on fs_a
332 self
.mount_a
.mount(mount_fs_name
=fs_a
.name
)
333 self
.mount_a
.write_n_mb("pad.bin", 1)
334 self
.mount_a
.write_n_mb("test.bin", 2)
335 a_created_ino
= self
.mount_a
.path_to_ino("test.bin")
336 self
.mount_a
.create_files()
338 # Mount a client on fs_b
339 self
.mount_b
.mount(mount_fs_name
=fs_b
.name
)
340 self
.mount_b
.write_n_mb("test.bin", 1)
341 b_created_ino
= self
.mount_b
.path_to_ino("test.bin")
342 self
.mount_b
.create_files()
344 # Check that a non-default filesystem mount survives an MDS
345 # failover (i.e. that map subscription is continuous, not
346 # just the first time), reproduces #16022
347 old_fs_b_mds
= fs_b
.get_active_names()[0]
348 self
.mds_cluster
.mds_stop(old_fs_b_mds
)
349 self
.mds_cluster
.mds_fail(old_fs_b_mds
)
350 fs_b
.wait_for_daemons()
351 background
= self
.mount_b
.write_background()
352 # Raise exception if the write doesn't finish (i.e. if client
353 # has not kept up with MDS failure)
355 self
.wait_until_true(lambda: background
.finished
, timeout
=30)
357 # The mount is stuck, we'll have to force it to fail cleanly
358 background
.stdin
.close()
359 self
.mount_b
.umount_wait(force
=True)
362 self
.mount_a
.umount_wait()
363 self
.mount_b
.umount_wait()
365 # See that the client's files went into the correct pool
366 self
.assertTrue(fs_a
.data_objects_present(a_created_ino
, 1024 * 1024))
367 self
.assertTrue(fs_b
.data_objects_present(b_created_ino
, 1024 * 1024))
369 def test_standby(self
):
370 fs_a
, fs_b
= self
._setup
_two
()
372 # Assert that the remaining two MDS daemons are now standbys
373 a_daemons
= fs_a
.get_active_names()
374 b_daemons
= fs_b
.get_active_names()
375 self
.assertEqual(len(a_daemons
), 1)
376 self
.assertEqual(len(b_daemons
), 1)
377 original_a
= a_daemons
[0]
378 original_b
= b_daemons
[0]
379 expect_standby_daemons
= set(self
.mds_cluster
.mds_ids
) - (set(a_daemons
) |
set(b_daemons
))
381 # Need all my standbys up as well as the active daemons
382 self
.wait_for_daemon_start()
383 self
.assertEqual(expect_standby_daemons
, self
.mds_cluster
.get_standby_daemons())
385 # Kill fs_a's active MDS, see a standby take over
386 self
.mds_cluster
.mds_stop(original_a
)
387 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("mds", "fail", original_a
)
388 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 1, 30,
389 reject_fn
=lambda v
: v
> 1)
390 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
391 self
.assertNotEqual(fs_a
.get_active_names()[0], original_a
)
393 # Kill fs_b's active MDS, see a standby take over
394 self
.mds_cluster
.mds_stop(original_b
)
395 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("mds", "fail", original_b
)
396 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 1, 30,
397 reject_fn
=lambda v
: v
> 1)
398 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
399 self
.assertNotEqual(fs_b
.get_active_names()[0], original_b
)
401 # Both of the original active daemons should be gone, and all standbys used up
402 self
.assertEqual(self
.mds_cluster
.get_standby_daemons(), set())
404 # Restart the ones I killed, see them reappear as standbys
405 self
.mds_cluster
.mds_restart(original_a
)
406 self
.mds_cluster
.mds_restart(original_b
)
407 self
.wait_until_true(
408 lambda: {original_a
, original_b
} == self
.mds_cluster
.get_standby_daemons(),
412 def test_grow_shrink(self
):
414 fs_a
, fs_b
= self
._setup
_two
()
415 fs_a
.set_allow_multimds(True)
416 fs_b
.set_allow_multimds(True)
418 # Increase max_mds on fs_b, see a standby take up the role
420 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 2, 30,
421 reject_fn
=lambda v
: v
> 2 or v
< 1)
423 # Increase max_mds on fs_a, see a standby take up the role
425 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 2, 30,
426 reject_fn
=lambda v
: v
> 2 or v
< 1)
428 # Shrink fs_b back to 1, see a daemon go back to standby
431 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 1, 30,
432 reject_fn
=lambda v
: v
> 2 or v
< 1)
434 # Grow fs_a up to 3, see the former fs_b daemon join it.
436 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 3, 60,
437 reject_fn
=lambda v
: v
> 3 or v
< 2)
439 def test_standby_for_name(self
):
440 # Pick out exactly 4 daemons to be run during test
441 use_daemons
= sorted(self
.mds_cluster
.mds_ids
[0:4])
442 mds_a
, mds_b
, mds_c
, mds_d
= use_daemons
443 log
.info("Using MDS daemons: {0}".format(use_daemons
))
445 def set_standby_for(leader
, follower
, replay
):
446 self
.set_conf("mds.{0}".format(follower
), "mds_standby_for_name", leader
)
448 self
.set_conf("mds.{0}".format(follower
), "mds_standby_replay", "true")
450 # Configure two pairs of MDSs that are standby for each other
451 set_standby_for(mds_a
, mds_b
, True)
452 set_standby_for(mds_b
, mds_a
, False)
453 set_standby_for(mds_c
, mds_d
, True)
454 set_standby_for(mds_d
, mds_c
, False)
456 # Create FS alpha and get mds_a to come up as active
457 fs_a
= self
.mds_cluster
.newfs("alpha")
458 self
.mds_cluster
.mds_restart(mds_a
)
459 fs_a
.wait_for_daemons()
460 self
.assertEqual(fs_a
.get_active_names(), [mds_a
])
462 # Create FS bravo and get mds_c to come up as active
463 fs_b
= self
.mds_cluster
.newfs("bravo")
464 self
.mds_cluster
.mds_restart(mds_c
)
465 fs_b
.wait_for_daemons()
466 self
.assertEqual(fs_b
.get_active_names(), [mds_c
])
469 self
.mds_cluster
.mds_restart(mds_b
)
470 self
.mds_cluster
.mds_restart(mds_d
)
471 self
.wait_for_daemon_start([mds_b
, mds_d
])
473 def get_info_by_name(fs
, mds_name
):
474 mds_map
= fs
.get_mds_map()
475 for gid_str
, info
in mds_map
['info'].items():
476 if info
['name'] == mds_name
:
479 log
.warn(json
.dumps(mds_map
, indent
=2))
480 raise RuntimeError("MDS '{0}' not found in filesystem MDSMap".format(mds_name
))
482 # See both standbys come up as standby replay for the correct ranks
483 # mds_b should be in filesystem alpha following mds_a
484 info_b
= get_info_by_name(fs_a
, mds_b
)
485 self
.assertEqual(info_b
['state'], "up:standby-replay")
486 self
.assertEqual(info_b
['standby_for_name'], mds_a
)
487 self
.assertEqual(info_b
['rank'], 0)
488 # mds_d should be in filesystem alpha following mds_c
489 info_d
= get_info_by_name(fs_b
, mds_d
)
490 self
.assertEqual(info_d
['state'], "up:standby-replay")
491 self
.assertEqual(info_d
['standby_for_name'], mds_c
)
492 self
.assertEqual(info_d
['rank'], 0)
494 # Kill both active daemons
495 self
.mds_cluster
.mds_stop(mds_a
)
496 self
.mds_cluster
.mds_fail(mds_a
)
497 self
.mds_cluster
.mds_stop(mds_c
)
498 self
.mds_cluster
.mds_fail(mds_c
)
500 # Wait for standbys to take over
501 fs_a
.wait_for_daemons()
502 self
.assertEqual(fs_a
.get_active_names(), [mds_b
])
503 fs_b
.wait_for_daemons()
504 self
.assertEqual(fs_b
.get_active_names(), [mds_d
])
506 # Start the original active daemons up again
507 self
.mds_cluster
.mds_restart(mds_a
)
508 self
.mds_cluster
.mds_restart(mds_c
)
509 self
.wait_for_daemon_start([mds_a
, mds_c
])
511 self
.assertEqual(set(self
.mds_cluster
.get_standby_daemons()),
514 def test_standby_for_rank(self
):
515 use_daemons
= sorted(self
.mds_cluster
.mds_ids
[0:4])
516 mds_a
, mds_b
, mds_c
, mds_d
= use_daemons
517 log
.info("Using MDS daemons: {0}".format(use_daemons
))
519 def set_standby_for(leader_rank
, leader_fs
, follower_id
):
520 self
.set_conf("mds.{0}".format(follower_id
),
521 "mds_standby_for_rank", leader_rank
)
523 fscid
= leader_fs
.get_namespace_id()
524 self
.set_conf("mds.{0}".format(follower_id
),
525 "mds_standby_for_fscid", fscid
)
527 fs_a
= self
.mds_cluster
.newfs("alpha")
528 fs_b
= self
.mds_cluster
.newfs("bravo")
529 set_standby_for(0, fs_a
, mds_a
)
530 set_standby_for(0, fs_a
, mds_b
)
531 set_standby_for(0, fs_b
, mds_c
)
532 set_standby_for(0, fs_b
, mds_d
)
534 self
.mds_cluster
.mds_restart(mds_a
)
535 fs_a
.wait_for_daemons()
536 self
.assertEqual(fs_a
.get_active_names(), [mds_a
])
538 self
.mds_cluster
.mds_restart(mds_c
)
539 fs_b
.wait_for_daemons()
540 self
.assertEqual(fs_b
.get_active_names(), [mds_c
])
542 self
.mds_cluster
.mds_restart(mds_b
)
543 self
.mds_cluster
.mds_restart(mds_d
)
544 self
.wait_for_daemon_start([mds_b
, mds_d
])
546 self
.mds_cluster
.mds_stop(mds_a
)
547 self
.mds_cluster
.mds_fail(mds_a
)
548 self
.mds_cluster
.mds_stop(mds_c
)
549 self
.mds_cluster
.mds_fail(mds_c
)
551 fs_a
.wait_for_daemons()
552 self
.assertEqual(fs_a
.get_active_names(), [mds_b
])
553 fs_b
.wait_for_daemons()
554 self
.assertEqual(fs_b
.get_active_names(), [mds_d
])
556 def test_standby_for_fscid(self
):
558 That I can set a standby FSCID with no rank, and the result is
559 that daemons join any rank for that filesystem.
561 use_daemons
= sorted(self
.mds_cluster
.mds_ids
[0:4])
562 mds_a
, mds_b
, mds_c
, mds_d
= use_daemons
564 log
.info("Using MDS daemons: {0}".format(use_daemons
))
566 def set_standby_for(leader_fs
, follower_id
):
567 fscid
= leader_fs
.get_namespace_id()
568 self
.set_conf("mds.{0}".format(follower_id
),
569 "mds_standby_for_fscid", fscid
)
571 # Create two filesystems which should have two ranks each
572 fs_a
= self
.mds_cluster
.newfs("alpha")
573 fs_a
.set_allow_multimds(True)
575 fs_b
= self
.mds_cluster
.newfs("bravo")
576 fs_b
.set_allow_multimds(True)
581 # Set all the daemons to have a FSCID assignment but no other
582 # standby preferences.
583 set_standby_for(fs_a
, mds_a
)
584 set_standby_for(fs_a
, mds_b
)
585 set_standby_for(fs_b
, mds_c
)
586 set_standby_for(fs_b
, mds_d
)
588 # Now when we start all daemons at once, they should fall into
589 # ranks in the right filesystem
590 self
.mds_cluster
.mds_restart(mds_a
)
591 self
.mds_cluster
.mds_restart(mds_b
)
592 self
.mds_cluster
.mds_restart(mds_c
)
593 self
.mds_cluster
.mds_restart(mds_d
)
594 self
.wait_for_daemon_start([mds_a
, mds_b
, mds_c
, mds_d
])
595 fs_a
.wait_for_daemons()
596 fs_b
.wait_for_daemons()
597 self
.assertEqual(set(fs_a
.get_active_names()), {mds_a
, mds_b
})
598 self
.assertEqual(set(fs_b
.get_active_names()), {mds_c
, mds_d
})
600 def test_standby_for_invalid_fscid(self
):
602 That an invalid standby_fscid does not cause a mon crash
604 use_daemons
= sorted(self
.mds_cluster
.mds_ids
[0:3])
605 mds_a
, mds_b
, mds_c
= use_daemons
606 log
.info("Using MDS daemons: {0}".format(use_daemons
))
608 def set_standby_for_rank(leader_rank
, follower_id
):
609 self
.set_conf("mds.{0}".format(follower_id
),
610 "mds_standby_for_rank", leader_rank
)
613 fs_a
= self
.mds_cluster
.newfs("cephfs")
615 # Get configured mons in the cluster, so we can see if any
617 configured_mons
= fs_a
.mon_manager
.get_mon_quorum()
619 # Set all the daemons to have a rank assignment but no other
620 # standby preferences.
621 set_standby_for_rank(0, mds_a
)
622 set_standby_for_rank(0, mds_b
)
624 # Set third daemon to have invalid fscid assignment and no other
625 # standby preferences
627 self
.set_conf("mds.{0}".format(mds_c
), "mds_standby_for_fscid", invalid_fscid
)
629 #Restart all the daemons to make the standby preference applied
630 self
.mds_cluster
.mds_restart(mds_a
)
631 self
.mds_cluster
.mds_restart(mds_b
)
632 self
.mds_cluster
.mds_restart(mds_c
)
633 self
.wait_for_daemon_start([mds_a
, mds_b
, mds_c
])
635 #Stop active mds daemon service of fs
636 if (fs_a
.get_active_names(), [mds_a
]):
637 self
.mds_cluster
.mds_stop(mds_a
)
638 self
.mds_cluster
.mds_fail(mds_a
)
639 fs_a
.wait_for_daemons()
641 self
.mds_cluster
.mds_stop(mds_b
)
642 self
.mds_cluster
.mds_fail(mds_b
)
643 fs_a
.wait_for_daemons()
645 #Get active mons from cluster
646 active_mons
= fs_a
.mon_manager
.get_mon_quorum()
648 #Check for active quorum mon status and configured mon status
649 self
.assertEqual(active_mons
, configured_mons
,
650 "Not all mons are in quorum Invalid standby invalid fscid test failed!")