]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_failover.py
5 from random
import randint
6 from six
.moves
import range
8 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
9 from teuthology
.exceptions
import CommandFailedError
10 from tasks
.cephfs
.fuse_mount
import FuseMount
12 log
= logging
.getLogger(__name__
)
14 class TestClusterAffinity(CephFSTestCase
):
18 def _verify_join_fs(self
, target
, status
=None):
20 status
= self
.fs
.wait_for_daemons(timeout
=30)
21 log
.debug("%s", status
)
22 target
= sorted(target
, key
=operator
.itemgetter('name'))
23 log
.info("target = %s", target
)
24 current
= list(status
.get_all())
25 current
= sorted(current
, key
=operator
.itemgetter('name'))
26 log
.info("current = %s", current
)
27 self
.assertEqual(len(current
), len(target
))
28 for i
in range(len(current
)):
29 for attr
in target
[i
]:
30 self
.assertIn(attr
, current
[i
])
31 self
.assertEqual(target
[i
][attr
], current
[i
][attr
])
33 def _change_target_state(self
, state
, name
, changes
):
35 if entity
['name'] == name
:
36 for k
, v
in changes
.items():
39 self
.fail("no entity")
41 def _verify_init(self
):
42 status
= self
.fs
.status()
43 log
.info("status = {0}".format(status
))
44 target
= [{'join_fscid': -1, 'name': info
['name']} for info
in status
.get_all()]
45 self
._verify
_join
_fs
(target
, status
=status
)
46 return (status
, target
)
48 def _reach_target(self
, target
):
51 self
._verify
_join
_fs
(target
)
53 except AssertionError as e
:
56 status
= self
.wait_until_true(takeover
, 30)
58 def test_join_fs_runtime(self
):
60 That setting mds_join_fs at runtime affects the cluster layout.
62 status
, target
= self
._verify
_init
()
63 standbys
= list(status
.get_standbys())
64 self
.config_set('mds.'+standbys
[0]['name'], 'mds_join_fs', 'cephfs')
65 self
._change
_target
_state
(target
, standbys
[0]['name'], {'join_fscid': self
.fs
.id, 'state': 'up:active'})
66 self
._reach
_target
(target
)
68 def test_join_fs_unset(self
):
70 That unsetting mds_join_fs will cause failover if another high-affinity standby exists.
72 status
, target
= self
._verify
_init
()
73 standbys
= list(status
.get_standbys())
74 names
= (standbys
[0]['name'], standbys
[1]['name'])
75 self
.config_set('mds.'+names
[0], 'mds_join_fs', 'cephfs')
76 self
.config_set('mds.'+names
[1], 'mds_join_fs', 'cephfs')
77 self
._change
_target
_state
(target
, names
[0], {'join_fscid': self
.fs
.id})
78 self
._change
_target
_state
(target
, names
[1], {'join_fscid': self
.fs
.id})
79 self
._reach
_target
(target
)
80 status
= self
.fs
.status()
81 active
= self
.fs
.get_active_names(status
=status
)[0]
82 self
.assertIn(active
, names
)
83 self
.config_rm('mds.'+active
, 'mds_join_fs')
84 self
._change
_target
_state
(target
, active
, {'join_fscid': -1})
85 new_active
= (set(names
) - set((active
,))).pop()
86 self
._change
_target
_state
(target
, new_active
, {'state': 'up:active'})
87 self
._reach
_target
(target
)
89 def test_join_fs_drop(self
):
91 That unsetting mds_join_fs will not cause failover if no high-affinity standby exists.
93 status
, target
= self
._verify
_init
()
94 standbys
= list(status
.get_standbys())
95 active
= standbys
[0]['name']
96 self
.config_set('mds.'+active
, 'mds_join_fs', 'cephfs')
97 self
._change
_target
_state
(target
, active
, {'join_fscid': self
.fs
.id, 'state': 'up:active'})
98 self
._reach
_target
(target
)
99 self
.config_rm('mds.'+active
, 'mds_join_fs')
100 self
._change
_target
_state
(target
, active
, {'join_fscid': -1})
101 self
._reach
_target
(target
)
103 def test_join_fs_vanilla(self
):
105 That a vanilla standby is preferred over others with mds_join_fs set to another fs.
107 self
.fs
.set_allow_multifs()
108 fs2
= self
.mds_cluster
.newfs(name
="cephfs2")
109 status
, target
= self
._verify
_init
()
110 active
= self
.fs
.get_active_names(status
=status
)[0]
111 standbys
= [info
['name'] for info
in status
.get_standbys()]
112 victim
= standbys
.pop()
113 # Set a bogus fs on the others
115 self
.config_set('mds.'+mds
, 'mds_join_fs', 'cephfs2')
116 self
._change
_target
_state
(target
, mds
, {'join_fscid': fs2
.id})
118 self
._change
_target
_state
(target
, victim
, {'state': 'up:active'})
119 self
._reach
_target
(target
)
120 status
= self
.fs
.status()
121 active
= self
.fs
.get_active_names(status
=status
)[0]
122 self
.assertEqual(active
, victim
)
124 def test_join_fs_last_resort(self
):
126 That a standby with mds_join_fs set to another fs is still used if necessary.
128 status
, target
= self
._verify
_init
()
129 active
= self
.fs
.get_active_names(status
=status
)[0]
130 standbys
= [info
['name'] for info
in status
.get_standbys()]
132 self
.config_set('mds.'+mds
, 'mds_join_fs', 'cephfs2')
133 self
.fs
.set_allow_multifs()
134 fs2
= self
.mds_cluster
.newfs(name
="cephfs2")
136 self
._change
_target
_state
(target
, mds
, {'join_fscid': fs2
.id})
138 status
= self
.fs
.status()
139 ranks
= list(self
.fs
.get_ranks(status
=status
))
140 self
.assertEqual(len(ranks
), 1)
141 self
.assertIn(ranks
[0]['name'], standbys
)
142 # Note that we would expect the former active to reclaim its spot, but
143 # we're not testing that here.
145 def test_join_fs_steady(self
):
147 That a sole MDS with mds_join_fs set will come back as active eventually even after failover.
149 status
, target
= self
._verify
_init
()
150 active
= self
.fs
.get_active_names(status
=status
)[0]
151 self
.config_set('mds.'+active
, 'mds_join_fs', 'cephfs')
152 self
._change
_target
_state
(target
, active
, {'join_fscid': self
.fs
.id})
153 self
._reach
_target
(target
)
155 self
._reach
_target
(target
)
157 def test_join_fs_standby_replay(self
):
159 That a standby-replay daemon with weak affinity is replaced by a stronger one.
161 status
, target
= self
._verify
_init
()
162 standbys
= [info
['name'] for info
in status
.get_standbys()]
163 self
.config_set('mds.'+standbys
[0], 'mds_join_fs', 'cephfs')
164 self
._change
_target
_state
(target
, standbys
[0], {'join_fscid': self
.fs
.id, 'state': 'up:active'})
165 self
._reach
_target
(target
)
166 self
.fs
.set_allow_standby_replay(True)
167 status
= self
.fs
.status()
168 standbys
= [info
['name'] for info
in status
.get_standbys()]
169 self
.config_set('mds.'+standbys
[0], 'mds_join_fs', 'cephfs')
170 self
._change
_target
_state
(target
, standbys
[0], {'join_fscid': self
.fs
.id, 'state': 'up:standby-replay'})
171 self
._reach
_target
(target
)
173 class TestClusterResize(CephFSTestCase
):
178 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
181 status
= self
.fs
.status()
182 log
.info("status = {0}".format(status
))
184 original_ranks
= set([info
['gid'] for info
in status
.get_ranks(fscid
)])
185 _
= set([info
['gid'] for info
in status
.get_standbys()])
187 oldmax
= self
.fs
.get_var('max_mds')
188 self
.assertTrue(n
> oldmax
)
189 self
.fs
.set_max_mds(n
)
191 log
.info("Waiting for cluster to grow.")
192 status
= self
.fs
.wait_for_daemons(timeout
=60+grace
*2)
193 ranks
= set([info
['gid'] for info
in status
.get_ranks(fscid
)])
194 self
.assertTrue(original_ranks
.issubset(ranks
) and len(ranks
) == n
)
198 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
201 status
= self
.fs
.status()
202 log
.info("status = {0}".format(status
))
204 original_ranks
= set([info
['gid'] for info
in status
.get_ranks(fscid
)])
205 _
= set([info
['gid'] for info
in status
.get_standbys()])
207 oldmax
= self
.fs
.get_var('max_mds')
208 self
.assertTrue(n
< oldmax
)
209 self
.fs
.set_max_mds(n
)
211 # Wait until the monitor finishes stopping ranks >= n
212 log
.info("Waiting for cluster to shink.")
213 status
= self
.fs
.wait_for_daemons(timeout
=60+grace
*2)
214 ranks
= set([info
['gid'] for info
in status
.get_ranks(fscid
)])
215 self
.assertTrue(ranks
.issubset(original_ranks
) and len(ranks
) == n
)
221 That the MDS cluster grows after increasing max_mds.
224 # Need all my standbys up as well as the active daemons
225 # self.wait_for_daemon_start() necessary?
231 def test_shrink(self
):
233 That the MDS cluster shrinks automatically after decreasing max_mds.
239 def test_up_less_than_max(self
):
241 That a health warning is generated when max_mds is greater than active count.
244 status
= self
.fs
.status()
245 mdss
= [info
['gid'] for info
in status
.get_all()]
246 self
.fs
.set_max_mds(len(mdss
)+1)
247 self
.wait_for_health("MDS_UP_LESS_THAN_MAX", 30)
249 self
.wait_for_health_clear(30)
251 def test_down_health(self
):
253 That marking a FS down does not generate a health warning
256 self
.mount_a
.umount_wait()
260 self
.wait_for_health("", 30)
261 raise RuntimeError("got health warning?")
262 except RuntimeError as e
:
263 if "Timed out after" in str(e
):
268 def test_down_twice(self
):
270 That marking a FS down twice does not wipe old_max_mds.
273 self
.mount_a
.umount_wait()
277 self
.fs
.wait_for_daemons()
278 self
.fs
.set_down(False)
279 self
.assertEqual(self
.fs
.get_var("max_mds"), 2)
280 self
.fs
.wait_for_daemons(timeout
=60)
282 def test_down_grow(self
):
284 That setting max_mds undoes down.
287 self
.mount_a
.umount_wait()
290 self
.fs
.wait_for_daemons()
292 self
.fs
.wait_for_daemons()
296 That down setting toggles and sets max_mds appropriately.
299 self
.mount_a
.umount_wait()
302 self
.fs
.wait_for_daemons()
303 self
.assertEqual(self
.fs
.get_var("max_mds"), 0)
304 self
.fs
.set_down(False)
305 self
.assertEqual(self
.fs
.get_var("max_mds"), 1)
306 self
.fs
.wait_for_daemons()
307 self
.assertEqual(self
.fs
.get_var("max_mds"), 1)
311 Test that a hole cannot be created in the FS ranks.
318 self
.fs
.set_max_mds(1)
319 log
.info("status = {0}".format(self
.fs
.status()))
321 self
.fs
.set_max_mds(3)
322 # Don't wait for rank 1 to stop
324 self
.fs
.set_max_mds(2)
325 # Prevent another MDS from taking rank 1
326 # XXX This is a little racy because rank 1 may have stopped and a
327 # standby assigned to rank 1 before joinable=0 is set.
328 self
.fs
.set_joinable(False) # XXX keep in mind changing max_mds clears this flag
331 status
= self
.fs
.wait_for_daemons(timeout
=90)
332 raise RuntimeError("should not be able to successfully shrink cluster!")
334 # could not shrink to max_mds=2 and reach 2 actives (because joinable=False)
335 status
= self
.fs
.status()
336 ranks
= set([info
['rank'] for info
in status
.get_ranks(fscid
)])
337 self
.assertTrue(ranks
== set([0]))
339 log
.info("status = {0}".format(status
))
341 def test_thrash(self
):
343 Test that thrashing max_mds does not fail.
347 for i
in range(0, 100):
348 self
.fs
.set_max_mds(max_mds
)
349 max_mds
= (max_mds
+1)%3+1
351 self
.fs
.wait_for_daemons(timeout
=90)
353 class TestFailover(CephFSTestCase
):
357 def test_simple(self
):
359 That when the active MDS is killed, a standby MDS is promoted into
360 its rank after the grace period.
362 This is just a simple unit test, the harder cases are covered
366 # Need all my standbys up as well as the active daemons
367 self
.wait_for_daemon_start()
369 (original_active
, ) = self
.fs
.get_active_names()
370 original_standbys
= self
.mds_cluster
.get_standby_daemons()
372 # Kill the rank 0 daemon's physical process
373 self
.fs
.mds_stop(original_active
)
375 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
377 # Wait until the monitor promotes his replacement
379 active
= self
.fs
.get_active_names()
380 return active
and active
[0] in original_standbys
382 log
.info("Waiting for promotion of one of the original standbys {0}".format(
384 self
.wait_until_true(
388 # Start the original rank 0 daemon up again, see that he becomes a standby
389 self
.fs
.mds_restart(original_active
)
390 self
.wait_until_true(
391 lambda: original_active
in self
.mds_cluster
.get_standby_daemons(),
392 timeout
=60 # Approximately long enough for MDS to start and mon to notice
395 def test_client_abort(self
):
397 That a client will respect fuse_require_active_mds and error out
398 when the cluster appears to be unavailable.
401 if not isinstance(self
.mount_a
, FuseMount
):
402 self
.skipTest("Requires FUSE client to inject client metadata")
404 require_active
= self
.fs
.get_config("fuse_require_active_mds", service_type
="mon").lower() == "true"
405 if not require_active
:
406 self
.skipTest("fuse_require_active_mds is not set")
408 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
410 # Check it's not laggy to begin with
411 (original_active
, ) = self
.fs
.get_active_names()
412 self
.assertNotIn("laggy_since", self
.fs
.status().get_mds(original_active
))
414 self
.mounts
[0].umount_wait()
416 # Control: that we can mount and unmount usually, while the cluster is healthy
417 self
.mounts
[0].mount_wait()
418 self
.mounts
[0].umount_wait()
420 # Stop the daemon processes
423 # Wait for everyone to go laggy
425 mdsmap
= self
.fs
.get_mds_map()
426 for info
in mdsmap
['info'].values():
427 if "laggy_since" not in info
:
432 self
.wait_until_true(laggy
, grace
* 2)
433 with self
.assertRaises(CommandFailedError
):
434 self
.mounts
[0].mount_wait()
436 def test_standby_count_wanted(self
):
438 That cluster health warnings are generated by insufficient standbys available.
441 # Need all my standbys up as well as the active daemons
442 self
.wait_for_daemon_start()
444 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
446 standbys
= self
.mds_cluster
.get_standby_daemons()
447 self
.assertGreaterEqual(len(standbys
), 1)
448 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', str(len(standbys
)))
450 # Kill a standby and check for warning
451 victim
= standbys
.pop()
452 self
.fs
.mds_stop(victim
)
453 log
.info("waiting for insufficient standby daemon warning")
454 self
.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace
*2)
456 # restart the standby, see that he becomes a standby, check health clears
457 self
.fs
.mds_restart(victim
)
458 self
.wait_until_true(
459 lambda: victim
in self
.mds_cluster
.get_standby_daemons(),
460 timeout
=60 # Approximately long enough for MDS to start and mon to notice
462 self
.wait_for_health_clear(timeout
=30)
464 # Set it one greater than standbys ever seen
465 standbys
= self
.mds_cluster
.get_standby_daemons()
466 self
.assertGreaterEqual(len(standbys
), 1)
467 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', str(len(standbys
)+1))
468 log
.info("waiting for insufficient standby daemon warning")
469 self
.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace
*2)
472 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', '0')
473 self
.wait_for_health_clear(timeout
=30)
475 def test_discontinuous_mdsmap(self
):
477 That discontinuous mdsmap does not affect failover.
478 See http://tracker.ceph.com/issues/24856.
480 self
.fs
.set_max_mds(2)
481 status
= self
.fs
.wait_for_daemons()
483 self
.mount_a
.umount_wait()
485 grace
= float(self
.fs
.get_config("mds_beacon_grace", service_type
="mon"))
486 monc_timeout
= float(self
.fs
.get_config("mon_client_ping_timeout", service_type
="mds"))
488 mds_0
= self
.fs
.get_rank(rank
=0, status
=status
)
489 self
.fs
.rank_freeze(True, rank
=0) # prevent failover
490 self
.fs
.rank_signal(signal
.SIGSTOP
, rank
=0, status
=status
)
491 self
.wait_until_true(
492 lambda: "laggy_since" in self
.fs
.get_rank(),
496 self
.fs
.rank_fail(rank
=1)
497 self
.fs
.wait_for_state('up:resolve', rank
=1, timeout
=30)
499 # Make sure of mds_0's monitor connection gets reset
500 time
.sleep(monc_timeout
* 2)
502 # Continue rank 0, it will get discontinuous mdsmap
503 self
.fs
.rank_signal(signal
.SIGCONT
, rank
=0)
504 self
.wait_until_true(
505 lambda: "laggy_since" not in self
.fs
.get_rank(rank
=0),
509 # mds.b will be stuck at 'reconnect' state if snapserver gets confused
510 # by discontinuous mdsmap
511 self
.fs
.wait_for_state('up:active', rank
=1, timeout
=30)
512 self
.assertEqual(mds_0
['gid'], self
.fs
.get_rank(rank
=0)['gid'])
513 self
.fs
.rank_freeze(False, rank
=0)
515 class TestStandbyReplay(CephFSTestCase
):
518 def _confirm_no_replay(self
):
519 status
= self
.fs
.status()
520 _
= len(list(status
.get_standbys()))
521 self
.assertEqual(0, len(list(self
.fs
.get_replays(status
=status
))))
524 def _confirm_single_replay(self
, full
=True, status
=None, retries
=3):
525 status
= self
.fs
.wait_for_daemons(status
=status
)
526 ranks
= sorted(self
.fs
.get_mds_map(status
=status
)['in'])
527 replays
= list(self
.fs
.get_replays(status
=status
))
528 checked_replays
= set()
531 for replay
in replays
:
532 if replay
['rank'] == rank
:
533 self
.assertFalse(has_replay
)
535 checked_replays
.add(replay
['gid'])
536 if full
and not has_replay
:
538 raise RuntimeError("rank "+str(rank
)+" has no standby-replay follower")
542 self
.assertEqual(checked_replays
, set(info
['gid'] for info
in replays
))
545 def _check_replay_takeover(self
, status
, rank
=0):
546 replay
= self
.fs
.get_replay(rank
=rank
, status
=status
)
547 new_status
= self
.fs
.wait_for_daemons()
548 new_active
= self
.fs
.get_rank(rank
=rank
, status
=new_status
)
550 self
.assertEqual(replay
['gid'], new_active
['gid'])
552 # double check takeover came from a standby (or some new daemon via restart)
554 for info
in status
.get_standbys():
555 if info
['gid'] == new_active
['gid']:
559 for info
in status
.get_all():
560 self
.assertNotEqual(info
['gid'], new_active
['gid'])
563 def test_standby_replay_singleton(self
):
565 That only one MDS becomes standby-replay.
568 self
._confirm
_no
_replay
()
569 self
.fs
.set_allow_standby_replay(True)
571 self
._confirm
_single
_replay
()
573 def test_standby_replay_singleton_fail(self
):
575 That failures don't violate singleton constraint.
578 self
._confirm
_no
_replay
()
579 self
.fs
.set_allow_standby_replay(True)
580 status
= self
._confirm
_single
_replay
()
583 time
.sleep(randint(1, 5))
584 self
.fs
.rank_restart(status
=status
)
585 status
= self
._check
_replay
_takeover
(status
)
586 status
= self
._confirm
_single
_replay
(status
=status
)
589 time
.sleep(randint(1, 5))
591 status
= self
._check
_replay
_takeover
(status
)
592 status
= self
._confirm
_single
_replay
(status
=status
)
594 def test_standby_replay_singleton_fail_multimds(self
):
596 That failures don't violate singleton constraint with multiple actives.
599 status
= self
._confirm
_no
_replay
()
600 new_max_mds
= randint(2, len(list(status
.get_standbys())))
601 self
.fs
.set_max_mds(new_max_mds
)
602 self
.fs
.wait_for_daemons() # wait for actives to come online!
603 self
.fs
.set_allow_standby_replay(True)
604 status
= self
._confirm
_single
_replay
(full
=False)
607 time
.sleep(randint(1, 5))
608 victim
= randint(0, new_max_mds
-1)
609 self
.fs
.rank_restart(rank
=victim
, status
=status
)
610 status
= self
._check
_replay
_takeover
(status
, rank
=victim
)
611 status
= self
._confirm
_single
_replay
(status
=status
, full
=False)
614 time
.sleep(randint(1, 5))
615 victim
= randint(0, new_max_mds
-1)
616 self
.fs
.rank_fail(rank
=victim
)
617 status
= self
._check
_replay
_takeover
(status
, rank
=victim
)
618 status
= self
._confirm
_single
_replay
(status
=status
, full
=False)
620 def test_standby_replay_failure(self
):
622 That the failure of a standby-replay daemon happens cleanly
623 and doesn't interrupt anything else.
626 status
= self
._confirm
_no
_replay
()
627 self
.fs
.set_max_mds(1)
628 self
.fs
.set_allow_standby_replay(True)
629 status
= self
._confirm
_single
_replay
()
632 time
.sleep(randint(1, 5))
633 victim
= self
.fs
.get_replay(status
=status
)
634 self
.fs
.mds_restart(mds_id
=victim
['name'])
635 status
= self
._confirm
_single
_replay
(status
=status
)
637 def test_rank_stopped(self
):
639 That when a rank is STOPPED, standby replays for
640 that rank get torn down
643 status
= self
._confirm
_no
_replay
()
644 standby_count
= len(list(status
.get_standbys()))
645 self
.fs
.set_max_mds(2)
646 self
.fs
.set_allow_standby_replay(True)
647 status
= self
._confirm
_single
_replay
()
649 self
.fs
.set_max_mds(1) # stop rank 1
651 status
= self
._confirm
_single
_replay
()
652 self
.assertTrue(standby_count
, len(list(status
.get_standbys())))
655 class TestMultiFilesystems(CephFSTestCase
):
659 # We'll create our own filesystems and start our own daemons
660 REQUIRE_FILESYSTEM
= False
663 super(TestMultiFilesystems
, self
).setUp()
664 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("fs", "flag", "set",
665 "enable_multiple", "true",
666 "--yes-i-really-mean-it")
668 def _setup_two(self
):
669 fs_a
= self
.mds_cluster
.newfs(name
="alpha")
670 fs_b
= self
.mds_cluster
.newfs(name
="bravo")
672 self
.mds_cluster
.mds_restart()
674 # Wait for both filesystems to go healthy
675 fs_a
.wait_for_daemons()
676 fs_b
.wait_for_daemons()
678 # Reconfigure client auth caps
679 for mount
in self
.mounts
:
680 self
.mds_cluster
.mon_manager
.raw_cluster_cmd_result(
681 'auth', 'caps', "client.{0}".format(mount
.client_id
),
684 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
685 fs_a
.get_data_pool_name(), fs_b
.get_data_pool_name()))
689 def test_clients(self
):
690 fs_a
, fs_b
= self
._setup
_two
()
692 # Mount a client on fs_a
693 self
.mount_a
.mount(mount_fs_name
=fs_a
.name
)
694 self
.mount_a
.write_n_mb("pad.bin", 1)
695 self
.mount_a
.write_n_mb("test.bin", 2)
696 a_created_ino
= self
.mount_a
.path_to_ino("test.bin")
697 self
.mount_a
.create_files()
699 # Mount a client on fs_b
700 self
.mount_b
.mount(mount_fs_name
=fs_b
.name
)
701 self
.mount_b
.write_n_mb("test.bin", 1)
702 b_created_ino
= self
.mount_b
.path_to_ino("test.bin")
703 self
.mount_b
.create_files()
705 # Check that a non-default filesystem mount survives an MDS
706 # failover (i.e. that map subscription is continuous, not
707 # just the first time), reproduces #16022
708 old_fs_b_mds
= fs_b
.get_active_names()[0]
709 self
.mds_cluster
.mds_stop(old_fs_b_mds
)
710 self
.mds_cluster
.mds_fail(old_fs_b_mds
)
711 fs_b
.wait_for_daemons()
712 background
= self
.mount_b
.write_background()
713 # Raise exception if the write doesn't finish (i.e. if client
714 # has not kept up with MDS failure)
716 self
.wait_until_true(lambda: background
.finished
, timeout
=30)
718 # The mount is stuck, we'll have to force it to fail cleanly
719 background
.stdin
.close()
720 self
.mount_b
.umount_wait(force
=True)
723 self
.mount_a
.umount_wait()
724 self
.mount_b
.umount_wait()
726 # See that the client's files went into the correct pool
727 self
.assertTrue(fs_a
.data_objects_present(a_created_ino
, 1024 * 1024))
728 self
.assertTrue(fs_b
.data_objects_present(b_created_ino
, 1024 * 1024))
730 def test_standby(self
):
731 fs_a
, fs_b
= self
._setup
_two
()
733 # Assert that the remaining two MDS daemons are now standbys
734 a_daemons
= fs_a
.get_active_names()
735 b_daemons
= fs_b
.get_active_names()
736 self
.assertEqual(len(a_daemons
), 1)
737 self
.assertEqual(len(b_daemons
), 1)
738 original_a
= a_daemons
[0]
739 original_b
= b_daemons
[0]
740 expect_standby_daemons
= set(self
.mds_cluster
.mds_ids
) - (set(a_daemons
) |
set(b_daemons
))
742 # Need all my standbys up as well as the active daemons
743 self
.wait_for_daemon_start()
744 self
.assertEqual(expect_standby_daemons
, self
.mds_cluster
.get_standby_daemons())
746 # Kill fs_a's active MDS, see a standby take over
747 self
.mds_cluster
.mds_stop(original_a
)
748 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("mds", "fail", original_a
)
749 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 1, 30,
750 reject_fn
=lambda v
: v
> 1)
751 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
752 self
.assertNotEqual(fs_a
.get_active_names()[0], original_a
)
754 # Kill fs_b's active MDS, see a standby take over
755 self
.mds_cluster
.mds_stop(original_b
)
756 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("mds", "fail", original_b
)
757 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 1, 30,
758 reject_fn
=lambda v
: v
> 1)
759 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
760 self
.assertNotEqual(fs_b
.get_active_names()[0], original_b
)
762 # Both of the original active daemons should be gone, and all standbys used up
763 self
.assertEqual(self
.mds_cluster
.get_standby_daemons(), set())
765 # Restart the ones I killed, see them reappear as standbys
766 self
.mds_cluster
.mds_restart(original_a
)
767 self
.mds_cluster
.mds_restart(original_b
)
768 self
.wait_until_true(
769 lambda: {original_a
, original_b
} == self
.mds_cluster
.get_standby_daemons(),
773 def test_grow_shrink(self
):
775 fs_a
, fs_b
= self
._setup
_two
()
777 # Increase max_mds on fs_b, see a standby take up the role
779 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 2, 30,
780 reject_fn
=lambda v
: v
> 2 or v
< 1)
782 # Increase max_mds on fs_a, see a standby take up the role
784 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 2, 30,
785 reject_fn
=lambda v
: v
> 2 or v
< 1)
787 # Shrink fs_b back to 1, see a daemon go back to standby
789 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 1, 30,
790 reject_fn
=lambda v
: v
> 2 or v
< 1)
792 # Grow fs_a up to 3, see the former fs_b daemon join it.
794 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 3, 60,
795 reject_fn
=lambda v
: v
> 3 or v
< 2)