]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_failover.py
5 from unittest
import case
, SkipTest
6 from random
import randint
8 from cephfs_test_case
import CephFSTestCase
9 from teuthology
.exceptions
import CommandFailedError
10 from teuthology
import misc
as teuthology
11 from tasks
.cephfs
.fuse_mount
import FuseMount
13 log
= logging
.getLogger(__name__
)
16 class TestClusterResize(CephFSTestCase
):
21 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
24 status
= self
.fs
.status()
25 log
.info("status = {0}".format(status
))
27 original_ranks
= set([info
['gid'] for info
in status
.get_ranks(fscid
)])
28 original_standbys
= set([info
['gid'] for info
in status
.get_standbys()])
30 oldmax
= self
.fs
.get_var('max_mds')
31 self
.assertTrue(n
> oldmax
)
32 self
.fs
.set_max_mds(n
)
34 log
.info("Waiting for cluster to grow.")
35 status
= self
.fs
.wait_for_daemons(timeout
=60+grace
*2)
36 ranks
= set([info
['gid'] for info
in status
.get_ranks(fscid
)])
37 self
.assertTrue(original_ranks
.issubset(ranks
) and len(ranks
) == n
)
41 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
44 status
= self
.fs
.status()
45 log
.info("status = {0}".format(status
))
47 original_ranks
= set([info
['gid'] for info
in status
.get_ranks(fscid
)])
48 original_standbys
= set([info
['gid'] for info
in status
.get_standbys()])
50 oldmax
= self
.fs
.get_var('max_mds')
51 self
.assertTrue(n
< oldmax
)
52 self
.fs
.set_max_mds(n
)
54 # Wait until the monitor finishes stopping ranks >= n
55 log
.info("Waiting for cluster to shink.")
56 status
= self
.fs
.wait_for_daemons(timeout
=60+grace
*2)
57 ranks
= set([info
['gid'] for info
in status
.get_ranks(fscid
)])
58 self
.assertTrue(ranks
.issubset(original_ranks
) and len(ranks
) == n
)
64 That the MDS cluster grows after increasing max_mds.
67 # Need all my standbys up as well as the active daemons
68 # self.wait_for_daemon_start() necessary?
74 def test_shrink(self
):
76 That the MDS cluster shrinks automatically after decreasing max_mds.
82 def test_up_less_than_max(self
):
84 That a health warning is generated when max_mds is greater than active count.
87 status
= self
.fs
.status()
88 mdss
= [info
['gid'] for info
in status
.get_all()]
89 self
.fs
.set_max_mds(len(mdss
)+1)
90 self
.wait_for_health("MDS_UP_LESS_THAN_MAX", 30)
92 self
.wait_for_health_clear(30)
94 def test_down_health(self
):
96 That marking a FS down does not generate a health warning
99 self
.mount_a
.umount_wait()
103 self
.wait_for_health("", 30)
104 raise RuntimeError("got health warning?")
105 except RuntimeError as e
:
106 if "Timed out after" in str(e
):
111 def test_down_twice(self
):
113 That marking a FS down twice does not wipe old_max_mds.
116 self
.mount_a
.umount_wait()
120 self
.fs
.wait_for_daemons()
121 self
.fs
.set_down(False)
122 self
.assertEqual(self
.fs
.get_var("max_mds"), 2)
123 self
.fs
.wait_for_daemons(timeout
=60)
125 def test_down_grow(self
):
127 That setting max_mds undoes down.
130 self
.mount_a
.umount_wait()
133 self
.fs
.wait_for_daemons()
135 self
.fs
.wait_for_daemons()
139 That down setting toggles and sets max_mds appropriately.
142 self
.mount_a
.umount_wait()
145 self
.fs
.wait_for_daemons()
146 self
.assertEqual(self
.fs
.get_var("max_mds"), 0)
147 self
.fs
.set_down(False)
148 self
.assertEqual(self
.fs
.get_var("max_mds"), 1)
149 self
.fs
.wait_for_daemons()
150 self
.assertEqual(self
.fs
.get_var("max_mds"), 1)
154 Test that a hole cannot be created in the FS ranks.
161 self
.fs
.set_max_mds(1)
162 log
.info("status = {0}".format(self
.fs
.status()))
164 self
.fs
.set_max_mds(3)
165 # Don't wait for rank 1 to stop
167 self
.fs
.set_max_mds(2)
168 # Prevent another MDS from taking rank 1
169 # XXX This is a little racy because rank 1 may have stopped and a
170 # standby assigned to rank 1 before joinable=0 is set.
171 self
.fs
.set_joinable(False) # XXX keep in mind changing max_mds clears this flag
174 status
= self
.fs
.wait_for_daemons(timeout
=90)
175 raise RuntimeError("should not be able to successfully shrink cluster!")
177 # could not shrink to max_mds=2 and reach 2 actives (because joinable=False)
178 status
= self
.fs
.status()
179 ranks
= set([info
['rank'] for info
in status
.get_ranks(fscid
)])
180 self
.assertTrue(ranks
== set([0]))
182 log
.info("status = {0}".format(status
))
184 def test_thrash(self
):
186 Test that thrashing max_mds does not fail.
190 for i
in range(0, 100):
191 self
.fs
.set_max_mds(max_mds
)
192 max_mds
= (max_mds
+1)%3+1
194 self
.fs
.wait_for_daemons(timeout
=90)
196 class TestFailover(CephFSTestCase
):
200 def test_simple(self
):
202 That when the active MDS is killed, a standby MDS is promoted into
203 its rank after the grace period.
205 This is just a simple unit test, the harder cases are covered
209 # Need all my standbys up as well as the active daemons
210 self
.wait_for_daemon_start()
212 (original_active
, ) = self
.fs
.get_active_names()
213 original_standbys
= self
.mds_cluster
.get_standby_daemons()
215 # Kill the rank 0 daemon's physical process
216 self
.fs
.mds_stop(original_active
)
218 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
220 # Wait until the monitor promotes his replacement
222 active
= self
.fs
.get_active_names()
223 return active
and active
[0] in original_standbys
225 log
.info("Waiting for promotion of one of the original standbys {0}".format(
227 self
.wait_until_true(
231 # Start the original rank 0 daemon up again, see that he becomes a standby
232 self
.fs
.mds_restart(original_active
)
233 self
.wait_until_true(
234 lambda: original_active
in self
.mds_cluster
.get_standby_daemons(),
235 timeout
=60 # Approximately long enough for MDS to start and mon to notice
238 def test_client_abort(self
):
240 That a client will respect fuse_require_active_mds and error out
241 when the cluster appears to be unavailable.
244 if not isinstance(self
.mount_a
, FuseMount
):
245 raise SkipTest("Requires FUSE client to inject client metadata")
247 require_active
= self
.fs
.get_config("fuse_require_active_mds", service_type
="mon").lower() == "true"
248 if not require_active
:
249 raise case
.SkipTest("fuse_require_active_mds is not set")
251 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
253 # Check it's not laggy to begin with
254 (original_active
, ) = self
.fs
.get_active_names()
255 self
.assertNotIn("laggy_since", self
.fs
.status().get_mds(original_active
))
257 self
.mounts
[0].umount_wait()
259 # Control: that we can mount and unmount usually, while the cluster is healthy
260 self
.mounts
[0].mount()
261 self
.mounts
[0].wait_until_mounted()
262 self
.mounts
[0].umount_wait()
264 # Stop the daemon processes
267 # Wait for everyone to go laggy
269 mdsmap
= self
.fs
.get_mds_map()
270 for info
in mdsmap
['info'].values():
271 if "laggy_since" not in info
:
276 self
.wait_until_true(laggy
, grace
* 2)
277 with self
.assertRaises(CommandFailedError
):
278 self
.mounts
[0].mount()
280 def test_standby_count_wanted(self
):
282 That cluster health warnings are generated by insufficient standbys available.
285 # Need all my standbys up as well as the active daemons
286 self
.wait_for_daemon_start()
288 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
290 standbys
= self
.mds_cluster
.get_standby_daemons()
291 self
.assertGreaterEqual(len(standbys
), 1)
292 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', str(len(standbys
)))
294 # Kill a standby and check for warning
295 victim
= standbys
.pop()
296 self
.fs
.mds_stop(victim
)
297 log
.info("waiting for insufficient standby daemon warning")
298 self
.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace
*2)
300 # restart the standby, see that he becomes a standby, check health clears
301 self
.fs
.mds_restart(victim
)
302 self
.wait_until_true(
303 lambda: victim
in self
.mds_cluster
.get_standby_daemons(),
304 timeout
=60 # Approximately long enough for MDS to start and mon to notice
306 self
.wait_for_health_clear(timeout
=30)
308 # Set it one greater than standbys ever seen
309 standbys
= self
.mds_cluster
.get_standby_daemons()
310 self
.assertGreaterEqual(len(standbys
), 1)
311 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', str(len(standbys
)+1))
312 log
.info("waiting for insufficient standby daemon warning")
313 self
.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace
*2)
316 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', '0')
317 self
.wait_for_health_clear(timeout
=30)
319 def test_discontinuous_mdsmap(self
):
321 That discontinuous mdsmap does not affect failover.
322 See http://tracker.ceph.com/issues/24856.
324 self
.fs
.set_max_mds(2)
325 status
= self
.fs
.wait_for_daemons()
327 self
.mount_a
.umount_wait()
329 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
330 monc_timeout
= float(self
.fs
.get_config("mon_client_ping_timeout", service_type
="mds"))
332 mds_0
= self
.fs
.get_rank(rank
=0, status
=status
)
333 self
.fs
.rank_freeze(True, rank
=0) # prevent failover
334 self
.fs
.rank_signal(signal
.SIGSTOP
, rank
=0, status
=status
)
335 self
.wait_until_true(
336 lambda: "laggy_since" in self
.fs
.get_rank(),
340 self
.fs
.rank_fail(rank
=1)
341 self
.fs
.wait_for_state('up:resolve', rank
=1, timeout
=30)
343 # Make sure of mds_0's monitor connection gets reset
344 time
.sleep(monc_timeout
* 2)
346 # Continue rank 0, it will get discontinuous mdsmap
347 self
.fs
.rank_signal(signal
.SIGCONT
, rank
=0)
348 self
.wait_until_true(
349 lambda: "laggy_since" not in self
.fs
.get_rank(rank
=0),
353 # mds.b will be stuck at 'reconnect' state if snapserver gets confused
354 # by discontinuous mdsmap
355 self
.fs
.wait_for_state('up:active', rank
=1, timeout
=30)
356 self
.assertEqual(mds_0
['gid'], self
.fs
.get_rank(rank
=0)['gid'])
357 self
.fs
.rank_freeze(False, rank
=0)
359 class TestStandbyReplay(CephFSTestCase
):
362 def _confirm_no_replay(self
):
363 status
= self
.fs
.status()
364 standby_count
= len(list(status
.get_standbys()))
365 self
.assertEqual(0, len(list(self
.fs
.get_replays(status
=status
))))
368 def _confirm_single_replay(self
, full
=True, status
=None):
369 status
= self
.fs
.wait_for_daemons(status
=status
)
370 ranks
= sorted(self
.fs
.get_mds_map(status
=status
)['in'])
371 replays
= list(self
.fs
.get_replays(status
=status
))
372 checked_replays
= set()
375 for replay
in replays
:
376 if replay
['rank'] == rank
:
377 self
.assertFalse(has_replay
)
379 checked_replays
.add(replay
['gid'])
380 if full
and not has_replay
:
381 raise RuntimeError("rank "+str(rank
)+" has no standby-replay follower")
382 self
.assertEqual(checked_replays
, set(info
['gid'] for info
in replays
))
385 def _check_replay_takeover(self
, status
, rank
=0):
386 replay
= self
.fs
.get_replay(rank
=rank
, status
=status
)
387 new_status
= self
.fs
.wait_for_daemons()
388 new_active
= self
.fs
.get_rank(rank
=rank
, status
=new_status
)
390 self
.assertEqual(replay
['gid'], new_active
['gid'])
392 # double check takeover came from a standby (or some new daemon via restart)
394 for info
in status
.get_standbys():
395 if info
['gid'] == new_active
['gid']:
399 for info
in status
.get_all():
400 self
.assertNotEqual(info
['gid'], new_active
['gid'])
403 def test_standby_replay_singleton(self
):
405 That only one MDS becomes standby-replay.
408 self
._confirm
_no
_replay
()
409 self
.fs
.set_allow_standby_replay(True)
411 self
._confirm
_single
_replay
()
413 def test_standby_replay_singleton_fail(self
):
415 That failures don't violate singleton constraint.
418 self
._confirm
_no
_replay
()
419 self
.fs
.set_allow_standby_replay(True)
420 status
= self
._confirm
_single
_replay
()
423 time
.sleep(randint(1, 5))
424 self
.fs
.rank_restart(status
=status
)
425 status
= self
._check
_replay
_takeover
(status
)
426 status
= self
._confirm
_single
_replay
(status
=status
)
429 time
.sleep(randint(1, 5))
431 status
= self
._check
_replay
_takeover
(status
)
432 status
= self
._confirm
_single
_replay
(status
=status
)
434 def test_standby_replay_singleton_fail_multimds(self
):
436 That failures don't violate singleton constraint with multiple actives.
439 status
= self
._confirm
_no
_replay
()
440 new_max_mds
= randint(2, len(list(status
.get_standbys())))
441 self
.fs
.set_max_mds(new_max_mds
)
442 self
.fs
.wait_for_daemons() # wait for actives to come online!
443 self
.fs
.set_allow_standby_replay(True)
444 status
= self
._confirm
_single
_replay
(full
=False)
447 time
.sleep(randint(1, 5))
448 victim
= randint(0, new_max_mds
-1)
449 self
.fs
.rank_restart(rank
=victim
, status
=status
)
450 status
= self
._check
_replay
_takeover
(status
, rank
=victim
)
451 status
= self
._confirm
_single
_replay
(status
=status
, full
=False)
454 time
.sleep(randint(1, 5))
455 victim
= randint(0, new_max_mds
-1)
456 self
.fs
.rank_fail(rank
=victim
)
457 status
= self
._check
_replay
_takeover
(status
, rank
=victim
)
458 status
= self
._confirm
_single
_replay
(status
=status
, full
=False)
460 def test_standby_replay_failure(self
):
462 That the failure of a standby-replay daemon happens cleanly
463 and doesn't interrupt anything else.
466 status
= self
._confirm
_no
_replay
()
467 self
.fs
.set_max_mds(1)
468 self
.fs
.set_allow_standby_replay(True)
469 status
= self
._confirm
_single
_replay
()
472 time
.sleep(randint(1, 5))
473 victim
= self
.fs
.get_replay(status
=status
)
474 self
.fs
.mds_restart(mds_id
=victim
['name'])
475 status
= self
._confirm
_single
_replay
(status
=status
)
477 def test_rank_stopped(self
):
479 That when a rank is STOPPED, standby replays for
480 that rank get torn down
483 status
= self
._confirm
_no
_replay
()
484 standby_count
= len(list(status
.get_standbys()))
485 self
.fs
.set_max_mds(2)
486 self
.fs
.set_allow_standby_replay(True)
487 status
= self
._confirm
_single
_replay
()
489 self
.fs
.set_max_mds(1) # stop rank 1
491 status
= self
._confirm
_single
_replay
()
492 self
.assertTrue(standby_count
, len(list(status
.get_standbys())))
495 class TestMultiFilesystems(CephFSTestCase
):
499 # We'll create our own filesystems and start our own daemons
500 REQUIRE_FILESYSTEM
= False
503 super(TestMultiFilesystems
, self
).setUp()
504 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("fs", "flag", "set",
505 "enable_multiple", "true",
506 "--yes-i-really-mean-it")
508 def _setup_two(self
):
509 fs_a
= self
.mds_cluster
.newfs("alpha")
510 fs_b
= self
.mds_cluster
.newfs("bravo")
512 self
.mds_cluster
.mds_restart()
514 # Wait for both filesystems to go healthy
515 fs_a
.wait_for_daemons()
516 fs_b
.wait_for_daemons()
518 # Reconfigure client auth caps
519 for mount
in self
.mounts
:
520 self
.mds_cluster
.mon_manager
.raw_cluster_cmd_result(
521 'auth', 'caps', "client.{0}".format(mount
.client_id
),
524 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
525 fs_a
.get_data_pool_name(), fs_b
.get_data_pool_name()))
529 def test_clients(self
):
530 fs_a
, fs_b
= self
._setup
_two
()
532 # Mount a client on fs_a
533 self
.mount_a
.mount(mount_fs_name
=fs_a
.name
)
534 self
.mount_a
.write_n_mb("pad.bin", 1)
535 self
.mount_a
.write_n_mb("test.bin", 2)
536 a_created_ino
= self
.mount_a
.path_to_ino("test.bin")
537 self
.mount_a
.create_files()
539 # Mount a client on fs_b
540 self
.mount_b
.mount(mount_fs_name
=fs_b
.name
)
541 self
.mount_b
.write_n_mb("test.bin", 1)
542 b_created_ino
= self
.mount_b
.path_to_ino("test.bin")
543 self
.mount_b
.create_files()
545 # Check that a non-default filesystem mount survives an MDS
546 # failover (i.e. that map subscription is continuous, not
547 # just the first time), reproduces #16022
548 old_fs_b_mds
= fs_b
.get_active_names()[0]
549 self
.mds_cluster
.mds_stop(old_fs_b_mds
)
550 self
.mds_cluster
.mds_fail(old_fs_b_mds
)
551 fs_b
.wait_for_daemons()
552 background
= self
.mount_b
.write_background()
553 # Raise exception if the write doesn't finish (i.e. if client
554 # has not kept up with MDS failure)
556 self
.wait_until_true(lambda: background
.finished
, timeout
=30)
558 # The mount is stuck, we'll have to force it to fail cleanly
559 background
.stdin
.close()
560 self
.mount_b
.umount_wait(force
=True)
563 self
.mount_a
.umount_wait()
564 self
.mount_b
.umount_wait()
566 # See that the client's files went into the correct pool
567 self
.assertTrue(fs_a
.data_objects_present(a_created_ino
, 1024 * 1024))
568 self
.assertTrue(fs_b
.data_objects_present(b_created_ino
, 1024 * 1024))
570 def test_standby(self
):
571 fs_a
, fs_b
= self
._setup
_two
()
573 # Assert that the remaining two MDS daemons are now standbys
574 a_daemons
= fs_a
.get_active_names()
575 b_daemons
= fs_b
.get_active_names()
576 self
.assertEqual(len(a_daemons
), 1)
577 self
.assertEqual(len(b_daemons
), 1)
578 original_a
= a_daemons
[0]
579 original_b
= b_daemons
[0]
580 expect_standby_daemons
= set(self
.mds_cluster
.mds_ids
) - (set(a_daemons
) |
set(b_daemons
))
582 # Need all my standbys up as well as the active daemons
583 self
.wait_for_daemon_start()
584 self
.assertEqual(expect_standby_daemons
, self
.mds_cluster
.get_standby_daemons())
586 # Kill fs_a's active MDS, see a standby take over
587 self
.mds_cluster
.mds_stop(original_a
)
588 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("mds", "fail", original_a
)
589 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 1, 30,
590 reject_fn
=lambda v
: v
> 1)
591 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
592 self
.assertNotEqual(fs_a
.get_active_names()[0], original_a
)
594 # Kill fs_b's active MDS, see a standby take over
595 self
.mds_cluster
.mds_stop(original_b
)
596 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("mds", "fail", original_b
)
597 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 1, 30,
598 reject_fn
=lambda v
: v
> 1)
599 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
600 self
.assertNotEqual(fs_b
.get_active_names()[0], original_b
)
602 # Both of the original active daemons should be gone, and all standbys used up
603 self
.assertEqual(self
.mds_cluster
.get_standby_daemons(), set())
605 # Restart the ones I killed, see them reappear as standbys
606 self
.mds_cluster
.mds_restart(original_a
)
607 self
.mds_cluster
.mds_restart(original_b
)
608 self
.wait_until_true(
609 lambda: {original_a
, original_b
} == self
.mds_cluster
.get_standby_daemons(),
613 def test_grow_shrink(self
):
615 fs_a
, fs_b
= self
._setup
_two
()
617 # Increase max_mds on fs_b, see a standby take up the role
619 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 2, 30,
620 reject_fn
=lambda v
: v
> 2 or v
< 1)
622 # Increase max_mds on fs_a, see a standby take up the role
624 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 2, 30,
625 reject_fn
=lambda v
: v
> 2 or v
< 1)
627 # Shrink fs_b back to 1, see a daemon go back to standby
629 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 1, 30,
630 reject_fn
=lambda v
: v
> 2 or v
< 1)
632 # Grow fs_a up to 3, see the former fs_b daemon join it.
634 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 3, 60,
635 reject_fn
=lambda v
: v
> 3 or v
< 2)