]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_failover.py
5 from random
import randint
, choice
7 from tasks
.cephfs
.cephfs_test_case
import CephFSTestCase
8 from teuthology
.exceptions
import CommandFailedError
9 from tasks
.cephfs
.fuse_mount
import FuseMount
11 log
= logging
.getLogger(__name__
)
13 class TestClusterAffinity(CephFSTestCase
):
17 def _verify_join_fs(self
, target
, status
=None, fs
=None):
22 status
= fs_select
.wait_for_daemons(timeout
=30)
23 log
.debug("%s", status
)
24 target
= sorted(target
, key
=operator
.itemgetter('name'))
25 log
.info("target = %s", target
)
26 current
= list(status
.get_all())
27 current
= sorted(current
, key
=operator
.itemgetter('name'))
28 log
.info("current = %s", current
)
29 self
.assertEqual(len(current
), len(target
))
30 for i
in range(len(current
)):
31 for attr
in target
[i
]:
32 self
.assertIn(attr
, current
[i
])
33 self
.assertEqual(target
[i
][attr
], current
[i
][attr
])
35 def _change_target_state(self
, state
, name
, changes
):
37 if entity
['name'] == name
:
38 for k
, v
in changes
.items():
41 self
.fail("no entity")
43 def _verify_init(self
, fs
=None):
47 status
= fs_select
.status()
48 log
.info("status = {0}".format(status
))
49 target
= [{'join_fscid': -1, 'name': info
['name']} for info
in status
.get_all()]
50 self
._verify
_join
_fs
(target
, status
=status
, fs
=fs_select
)
51 return (status
, target
)
53 def _reach_target(self
, target
):
56 self
._verify
_join
_fs
(target
)
58 except AssertionError as e
:
61 self
.wait_until_true(takeover
, 30)
63 def test_join_fs_runtime(self
):
65 That setting mds_join_fs at runtime affects the cluster layout.
67 status
, target
= self
._verify
_init
()
68 standbys
= list(status
.get_standbys())
69 self
.config_set('mds.'+standbys
[0]['name'], 'mds_join_fs', 'cephfs')
70 self
._change
_target
_state
(target
, standbys
[0]['name'], {'join_fscid': self
.fs
.id, 'state': 'up:active'})
71 self
._reach
_target
(target
)
73 def test_join_fs_unset(self
):
75 That unsetting mds_join_fs will cause failover if another high-affinity standby exists.
77 status
, target
= self
._verify
_init
()
78 standbys
= list(status
.get_standbys())
79 names
= (standbys
[0]['name'], standbys
[1]['name'])
80 self
.config_set('mds.'+names
[0], 'mds_join_fs', 'cephfs')
81 self
.config_set('mds.'+names
[1], 'mds_join_fs', 'cephfs')
82 self
._change
_target
_state
(target
, names
[0], {'join_fscid': self
.fs
.id})
83 self
._change
_target
_state
(target
, names
[1], {'join_fscid': self
.fs
.id})
84 self
._reach
_target
(target
)
85 time
.sleep(5) # MDSMonitor tick
86 status
= self
.fs
.wait_for_daemons()
87 active
= self
.fs
.get_active_names(status
=status
)[0]
88 self
.assertIn(active
, names
)
89 self
.config_rm('mds.'+active
, 'mds_join_fs')
90 self
._change
_target
_state
(target
, active
, {'join_fscid': -1})
91 new_active
= (set(names
) - set((active
,))).pop()
92 self
._change
_target
_state
(target
, new_active
, {'state': 'up:active'})
93 self
._reach
_target
(target
)
95 def test_join_fs_drop(self
):
97 That unsetting mds_join_fs will not cause failover if no high-affinity standby exists.
99 status
, target
= self
._verify
_init
()
100 standbys
= list(status
.get_standbys())
101 active
= standbys
[0]['name']
102 self
.config_set('mds.'+active
, 'mds_join_fs', 'cephfs')
103 self
._change
_target
_state
(target
, active
, {'join_fscid': self
.fs
.id, 'state': 'up:active'})
104 self
._reach
_target
(target
)
105 self
.config_rm('mds.'+active
, 'mds_join_fs')
106 self
._change
_target
_state
(target
, active
, {'join_fscid': -1})
107 self
._reach
_target
(target
)
109 def test_join_fs_vanilla(self
):
111 That a vanilla standby is preferred over others with mds_join_fs set to another fs.
113 fs2
= self
.mds_cluster
.newfs(name
="cephfs2")
114 status
, target
= self
._verify
_init
()
115 active
= self
.fs
.get_active_names(status
=status
)[0]
116 status2
, _
= self
._verify
_init
(fs
=fs2
)
117 active2
= fs2
.get_active_names(status
=status2
)[0]
118 standbys
= [info
['name'] for info
in status
.get_standbys()]
119 victim
= standbys
.pop()
120 # Set a bogus fs on the others
122 self
.config_set('mds.'+mds
, 'mds_join_fs', 'cephfs2')
123 self
._change
_target
_state
(target
, mds
, {'join_fscid': fs2
.id})
124 # The active MDS for cephfs2 will be replaced by the MDS for which
125 # file system affinity has been set. Also, set the affinity for
126 # the earlier active MDS so that it is not chosen by the monitors
127 # as an active MDS for the existing file system.
128 log
.info(f
'assigning affinity to cephfs2 for active mds (mds.{active2})')
129 self
.config_set(f
'mds.{active2}', 'mds_join_fs', 'cephfs2')
130 self
._change
_target
_state
(target
, active2
, {'join_fscid': fs2
.id})
132 self
._change
_target
_state
(target
, victim
, {'state': 'up:active'})
133 self
._reach
_target
(target
)
134 status
= self
.fs
.status()
135 active
= self
.fs
.get_active_names(status
=status
)[0]
136 self
.assertEqual(active
, victim
)
138 def test_join_fs_last_resort(self
):
140 That a standby with mds_join_fs set to another fs is still used if necessary.
142 status
, target
= self
._verify
_init
()
143 standbys
= [info
['name'] for info
in status
.get_standbys()]
145 self
.config_set('mds.'+mds
, 'mds_join_fs', 'cephfs2')
146 fs2
= self
.mds_cluster
.newfs(name
="cephfs2")
148 self
._change
_target
_state
(target
, mds
, {'join_fscid': fs2
.id})
150 status
= self
.fs
.status()
151 ranks
= list(self
.fs
.get_ranks(status
=status
))
152 self
.assertEqual(len(ranks
), 1)
153 self
.assertIn(ranks
[0]['name'], standbys
)
154 # Note that we would expect the former active to reclaim its spot, but
155 # we're not testing that here.
157 def test_join_fs_steady(self
):
159 That a sole MDS with mds_join_fs set will come back as active eventually even after failover.
161 status
, target
= self
._verify
_init
()
162 active
= self
.fs
.get_active_names(status
=status
)[0]
163 self
.config_set('mds.'+active
, 'mds_join_fs', 'cephfs')
164 self
._change
_target
_state
(target
, active
, {'join_fscid': self
.fs
.id})
165 self
._reach
_target
(target
)
167 self
._reach
_target
(target
)
169 def test_join_fs_standby_replay(self
):
171 That a standby-replay daemon with weak affinity is replaced by a stronger one.
173 status
, target
= self
._verify
_init
()
174 standbys
= [info
['name'] for info
in status
.get_standbys()]
175 self
.config_set('mds.'+standbys
[0], 'mds_join_fs', 'cephfs')
176 self
._change
_target
_state
(target
, standbys
[0], {'join_fscid': self
.fs
.id, 'state': 'up:active'})
177 self
._reach
_target
(target
)
178 self
.fs
.set_allow_standby_replay(True)
179 status
= self
.fs
.status()
180 standbys
= [info
['name'] for info
in status
.get_standbys()]
181 self
.config_set('mds.'+standbys
[0], 'mds_join_fs', 'cephfs')
182 self
._change
_target
_state
(target
, standbys
[0], {'join_fscid': self
.fs
.id, 'state': 'up:standby-replay'})
183 self
._reach
_target
(target
)
185 class TestClusterResize(CephFSTestCase
):
191 That the MDS cluster grows after increasing max_mds.
194 # Need all my standbys up as well as the active daemons
195 # self.wait_for_daemon_start() necessary?
201 def test_shrink(self
):
203 That the MDS cluster shrinks automatically after decreasing max_mds.
209 def test_up_less_than_max(self
):
211 That a health warning is generated when max_mds is greater than active count.
214 status
= self
.fs
.status()
215 mdss
= [info
['gid'] for info
in status
.get_all()]
216 self
.fs
.set_max_mds(len(mdss
)+1)
217 self
.wait_for_health("MDS_UP_LESS_THAN_MAX", 30)
219 self
.wait_for_health_clear(30)
221 def test_down_health(self
):
223 That marking a FS down does not generate a health warning
228 self
.wait_for_health("", 30)
229 raise RuntimeError("got health warning?")
230 except RuntimeError as e
:
231 if "Timed out after" in str(e
):
236 def test_down_twice(self
):
238 That marking a FS down twice does not wipe old_max_mds.
243 self
.fs
.wait_for_daemons()
244 self
.fs
.set_down(False)
245 self
.assertEqual(self
.fs
.get_var("max_mds"), 2)
246 self
.fs
.wait_for_daemons(timeout
=60)
248 def test_down_grow(self
):
250 That setting max_mds undoes down.
254 self
.fs
.wait_for_daemons()
256 self
.fs
.wait_for_daemons()
260 That down setting toggles and sets max_mds appropriately.
264 self
.fs
.wait_for_daemons()
265 self
.assertEqual(self
.fs
.get_var("max_mds"), 0)
266 self
.fs
.set_down(False)
267 self
.assertEqual(self
.fs
.get_var("max_mds"), 1)
268 self
.fs
.wait_for_daemons()
269 self
.assertEqual(self
.fs
.get_var("max_mds"), 1)
273 Test that a hole cannot be created in the FS ranks.
280 # Now add a delay which should slow down how quickly rank 1 stops
281 self
.config_set('mds', 'ms_inject_delay_max', '5.0')
282 self
.config_set('mds', 'ms_inject_delay_probability', '1.0')
283 self
.fs
.set_max_mds(1)
284 log
.info("status = {0}".format(self
.fs
.status()))
286 # Don't wait for rank 1 to stop
287 self
.fs
.set_max_mds(3)
288 log
.info("status = {0}".format(self
.fs
.status()))
290 # Now check that the mons didn't try to promote a standby to rank 2
291 self
.fs
.set_max_mds(2)
292 status
= self
.fs
.status()
294 status
= self
.fs
.wait_for_daemons(timeout
=90)
295 ranks
= set([info
['rank'] for info
in status
.get_ranks(fscid
)])
296 self
.assertEqual(ranks
, set([0, 1]))
298 log
.info("status = {0}".format(status
))
300 def test_thrash(self
):
302 Test that thrashing max_mds does not fail.
306 for i
in range(0, 100):
307 self
.fs
.set_max_mds(max_mds
)
308 max_mds
= (max_mds
+1)%3+1
310 self
.fs
.wait_for_daemons(timeout
=90)
312 class TestFailover(CephFSTestCase
):
316 def test_repeated_boot(self
):
318 That multiple boot messages do not result in the MDS getting evicted.
322 self
.config_set("mon", "paxos_propose_interval", interval
)
324 mds
= choice(list(self
.fs
.status().get_all()))
326 with self
.assert_cluster_log(f
"daemon mds.{mds['name']} restarted", present
=False):
327 # Avoid a beacon to the monitors with down:dne by restarting:
328 self
.fs
.mds_fail(mds_id
=mds
['name'])
329 # `ceph mds fail` won't return until the FSMap is committed, double-check:
330 self
.assertIsNone(self
.fs
.status().get_mds_gid(mds
['gid']))
331 time
.sleep(2) # for mds to restart and accept asok commands
332 status1
= self
.fs
.mds_asok(['status'], mds_id
=mds
['name'])
333 time
.sleep(interval
*1.5)
334 status2
= self
.fs
.mds_asok(['status'], mds_id
=mds
['name'])
335 self
.assertEqual(status1
['id'], status2
['id'])
337 def test_simple(self
):
339 That when the active MDS is killed, a standby MDS is promoted into
340 its rank after the grace period.
342 This is just a simple unit test, the harder cases are covered
346 (original_active
, ) = self
.fs
.get_active_names()
347 original_standbys
= self
.mds_cluster
.get_standby_daemons()
349 # Kill the rank 0 daemon's physical process
350 self
.fs
.mds_stop(original_active
)
352 # Wait until the monitor promotes his replacement
354 ranks
= list(self
.fs
.get_ranks())
355 return len(ranks
) > 0 and ranks
[0]['name'] in original_standbys
357 log
.info("Waiting for promotion of one of the original standbys {0}".format(
359 self
.wait_until_true(promoted
, timeout
=self
.fs
.beacon_timeout
)
361 # Start the original rank 0 daemon up again, see that he becomes a standby
362 self
.fs
.mds_restart(original_active
)
363 self
.wait_until_true(
364 lambda: original_active
in self
.mds_cluster
.get_standby_daemons(),
365 timeout
=60 # Approximately long enough for MDS to start and mon to notice
368 def test_client_abort(self
):
370 That a client will respect fuse_require_active_mds and error out
371 when the cluster appears to be unavailable.
374 if not isinstance(self
.mount_a
, FuseMount
):
375 self
.skipTest("Requires FUSE client to inject client metadata")
377 require_active
= self
.fs
.get_config("fuse_require_active_mds", service_type
="mon").lower() == "true"
378 if not require_active
:
379 self
.skipTest("fuse_require_active_mds is not set")
381 # Check it's not laggy to begin with
382 (original_active
, ) = self
.fs
.get_active_names()
383 self
.assertNotIn("laggy_since", self
.fs
.status().get_mds(original_active
))
385 self
.mounts
[0].umount_wait()
387 # Control: that we can mount and unmount usually, while the cluster is healthy
388 self
.mounts
[0].mount_wait()
389 self
.mounts
[0].umount_wait()
391 # Stop the daemon processes
394 # Wait for everyone to go laggy
396 mdsmap
= self
.fs
.get_mds_map()
397 for info
in mdsmap
['info'].values():
398 if "laggy_since" not in info
:
403 self
.wait_until_true(laggy
, self
.fs
.beacon_timeout
)
404 with self
.assertRaises(CommandFailedError
):
405 self
.mounts
[0].mount_wait()
407 def test_standby_count_wanted(self
):
409 That cluster health warnings are generated by insufficient standbys available.
412 # Need all my standbys up as well as the active daemons
413 self
.wait_for_daemon_start()
415 standbys
= self
.mds_cluster
.get_standby_daemons()
416 self
.assertGreaterEqual(len(standbys
), 1)
417 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', str(len(standbys
)))
419 # Kill a standby and check for warning
420 victim
= standbys
.pop()
421 self
.fs
.mds_stop(victim
)
422 self
.wait_for_health("MDS_INSUFFICIENT_STANDBY", self
.fs
.beacon_timeout
)
424 # restart the standby, see that he becomes a standby, check health clears
425 self
.fs
.mds_restart(victim
)
426 self
.wait_until_true(
427 lambda: victim
in self
.mds_cluster
.get_standby_daemons(),
428 timeout
=60 # Approximately long enough for MDS to start and mon to notice
430 self
.wait_for_health_clear(timeout
=30)
432 # Set it one greater than standbys ever seen
433 standbys
= self
.mds_cluster
.get_standby_daemons()
434 self
.assertGreaterEqual(len(standbys
), 1)
435 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', str(len(standbys
)+1))
436 self
.wait_for_health("MDS_INSUFFICIENT_STANDBY", self
.fs
.beacon_timeout
)
439 self
.fs
.mon_manager
.raw_cluster_cmd('fs', 'set', self
.fs
.name
, 'standby_count_wanted', '0')
440 self
.wait_for_health_clear(timeout
=30)
442 def test_discontinuous_mdsmap(self
):
444 That discontinuous mdsmap does not affect failover.
445 See http://tracker.ceph.com/issues/24856.
447 self
.fs
.set_max_mds(2)
448 status
= self
.fs
.wait_for_daemons()
450 self
.mount_a
.umount_wait()
452 monc_timeout
= float(self
.fs
.get_config("mon_client_ping_timeout", service_type
="mds"))
454 mds_0
= self
.fs
.get_rank(rank
=0, status
=status
)
455 self
.fs
.rank_freeze(True, rank
=0) # prevent failover
456 self
.fs
.rank_signal(signal
.SIGSTOP
, rank
=0, status
=status
)
457 self
.wait_until_true(
458 lambda: "laggy_since" in self
.fs
.get_rank(),
459 timeout
=self
.fs
.beacon_timeout
462 self
.fs
.rank_fail(rank
=1)
463 self
.fs
.wait_for_state('up:resolve', rank
=1, timeout
=30)
465 # Make sure of mds_0's monitor connection gets reset
466 time
.sleep(monc_timeout
* 2)
468 # Continue rank 0, it will get discontinuous mdsmap
469 self
.fs
.rank_signal(signal
.SIGCONT
, rank
=0)
470 self
.wait_until_true(
471 lambda: "laggy_since" not in self
.fs
.get_rank(rank
=0),
472 timeout
=self
.fs
.beacon_timeout
475 # mds.b will be stuck at 'reconnect' state if snapserver gets confused
476 # by discontinuous mdsmap
477 self
.fs
.wait_for_state('up:active', rank
=1, timeout
=30)
478 self
.assertEqual(mds_0
['gid'], self
.fs
.get_rank(rank
=0)['gid'])
479 self
.fs
.rank_freeze(False, rank
=0)
481 def test_connect_bootstrapping(self
):
482 self
.config_set("mds", "mds_sleep_rank_change", 10000000.0)
483 self
.config_set("mds", "mds_connect_bootstrapping", True)
484 self
.fs
.set_max_mds(2)
485 self
.fs
.wait_for_daemons()
486 self
.fs
.rank_fail(rank
=0)
487 # rank 0 will get stuck in up:resolve, see https://tracker.ceph.com/issues/53194
488 self
.fs
.wait_for_daemons()
491 class TestStandbyReplay(CephFSTestCase
):
495 def _confirm_no_replay(self
):
496 status
= self
.fs
.status()
497 _
= len(list(status
.get_standbys()))
498 self
.assertEqual(0, len(list(self
.fs
.get_replays(status
=status
))))
501 def _confirm_single_replay(self
, full
=True, status
=None, retries
=3):
502 status
= self
.fs
.wait_for_daemons(status
=status
)
503 ranks
= sorted(self
.fs
.get_mds_map(status
=status
)['in'])
504 replays
= list(self
.fs
.get_replays(status
=status
))
505 checked_replays
= set()
508 for replay
in replays
:
509 if replay
['rank'] == rank
:
510 self
.assertFalse(has_replay
)
512 checked_replays
.add(replay
['gid'])
513 if full
and not has_replay
:
515 raise RuntimeError("rank "+str(rank
)+" has no standby-replay follower")
519 self
.assertEqual(checked_replays
, set(info
['gid'] for info
in replays
))
522 def _check_replay_takeover(self
, status
, rank
=0):
523 replay
= self
.fs
.get_replay(rank
=rank
, status
=status
)
524 new_status
= self
.fs
.wait_for_daemons()
525 new_active
= self
.fs
.get_rank(rank
=rank
, status
=new_status
)
527 self
.assertEqual(replay
['gid'], new_active
['gid'])
529 # double check takeover came from a standby (or some new daemon via restart)
531 for info
in status
.get_standbys():
532 if info
['gid'] == new_active
['gid']:
536 for info
in status
.get_all():
537 self
.assertNotEqual(info
['gid'], new_active
['gid'])
540 def test_standby_replay_singleton(self
):
542 That only one MDS becomes standby-replay.
545 self
._confirm
_no
_replay
()
546 self
.fs
.set_allow_standby_replay(True)
548 self
._confirm
_single
_replay
()
550 def test_standby_replay_damaged(self
):
552 That a standby-replay daemon can cause the rank to go damaged correctly.
555 self
._confirm
_no
_replay
()
556 self
.config_set("mds", "mds_standby_replay_damaged", True)
557 self
.fs
.set_allow_standby_replay(True)
558 self
.wait_until_true(
559 lambda: len(self
.fs
.get_damaged()) > 0,
562 status
= self
.fs
.status()
563 self
.assertListEqual([], list(self
.fs
.get_ranks(status
=status
)))
564 self
.assertListEqual([0], self
.fs
.get_damaged(status
=status
))
566 def test_standby_replay_disable(self
):
568 That turning off allow_standby_replay fails all standby-replay daemons.
571 self
._confirm
_no
_replay
()
572 self
.fs
.set_allow_standby_replay(True)
574 self
._confirm
_single
_replay
()
575 self
.fs
.set_allow_standby_replay(False)
576 self
._confirm
_no
_replay
()
578 def test_standby_replay_singleton_fail(self
):
580 That failures don't violate singleton constraint.
583 self
._confirm
_no
_replay
()
584 self
.fs
.set_allow_standby_replay(True)
585 status
= self
._confirm
_single
_replay
()
588 time
.sleep(randint(1, 5))
589 self
.fs
.rank_restart(status
=status
)
590 status
= self
._check
_replay
_takeover
(status
)
591 status
= self
._confirm
_single
_replay
(status
=status
)
594 time
.sleep(randint(1, 5))
596 status
= self
._check
_replay
_takeover
(status
)
597 status
= self
._confirm
_single
_replay
(status
=status
)
599 def test_standby_replay_singleton_fail_multimds(self
):
601 That failures don't violate singleton constraint with multiple actives.
604 status
= self
._confirm
_no
_replay
()
605 new_max_mds
= randint(2, len(list(status
.get_standbys())))
606 self
.fs
.set_max_mds(new_max_mds
)
607 self
.fs
.wait_for_daemons() # wait for actives to come online!
608 self
.fs
.set_allow_standby_replay(True)
609 status
= self
._confirm
_single
_replay
(full
=False)
612 time
.sleep(randint(1, 5))
613 victim
= randint(0, new_max_mds
-1)
614 self
.fs
.rank_restart(rank
=victim
, status
=status
)
615 status
= self
._check
_replay
_takeover
(status
, rank
=victim
)
616 status
= self
._confirm
_single
_replay
(status
=status
, full
=False)
619 time
.sleep(randint(1, 5))
620 victim
= randint(0, new_max_mds
-1)
621 self
.fs
.rank_fail(rank
=victim
)
622 status
= self
._check
_replay
_takeover
(status
, rank
=victim
)
623 status
= self
._confirm
_single
_replay
(status
=status
, full
=False)
625 def test_standby_replay_failure(self
):
627 That the failure of a standby-replay daemon happens cleanly
628 and doesn't interrupt anything else.
631 status
= self
._confirm
_no
_replay
()
632 self
.fs
.set_max_mds(1)
633 self
.fs
.set_allow_standby_replay(True)
634 status
= self
._confirm
_single
_replay
()
637 time
.sleep(randint(1, 5))
638 victim
= self
.fs
.get_replay(status
=status
)
639 self
.fs
.mds_restart(mds_id
=victim
['name'])
640 status
= self
._confirm
_single
_replay
(status
=status
)
642 def test_standby_replay_prepare_beacon(self
):
644 That a MDSMonitor::prepare_beacon handles standby-replay daemons
645 correctly without removing the standby. (Note, usually a standby-replay
646 beacon will just be replied to by MDSMonitor::preprocess_beacon.)
649 status
= self
._confirm
_no
_replay
()
650 self
.fs
.set_max_mds(1)
651 self
.fs
.set_allow_standby_replay(True)
652 status
= self
._confirm
_single
_replay
()
653 replays
= list(status
.get_replays(self
.fs
.id))
654 self
.assertEqual(len(replays
), 1)
655 self
.config_set('mds.'+replays
[0]['name'], 'mds_inject_health_dummy', True)
656 time
.sleep(10) # for something not to happen...
657 status
= self
._confirm
_single
_replay
()
658 replays2
= list(status
.get_replays(self
.fs
.id))
659 self
.assertEqual(replays
[0]['gid'], replays2
[0]['gid'])
661 def test_rank_stopped(self
):
663 That when a rank is STOPPED, standby replays for
664 that rank get torn down
667 status
= self
._confirm
_no
_replay
()
668 standby_count
= len(list(status
.get_standbys()))
669 self
.fs
.set_max_mds(2)
670 self
.fs
.set_allow_standby_replay(True)
671 status
= self
._confirm
_single
_replay
()
673 self
.fs
.set_max_mds(1) # stop rank 1
675 status
= self
._confirm
_single
_replay
()
676 self
.assertTrue(standby_count
, len(list(status
.get_standbys())))
679 class TestMultiFilesystems(CephFSTestCase
):
683 # We'll create our own filesystems and start our own daemons
684 REQUIRE_FILESYSTEM
= False
687 super(TestMultiFilesystems
, self
).setUp()
688 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("fs", "flag", "set",
689 "enable_multiple", "true",
690 "--yes-i-really-mean-it")
692 def _setup_two(self
):
693 fs_a
= self
.mds_cluster
.newfs(name
="alpha")
694 fs_b
= self
.mds_cluster
.newfs(name
="bravo")
696 self
.mds_cluster
.mds_restart()
698 # Wait for both filesystems to go healthy
699 fs_a
.wait_for_daemons()
700 fs_b
.wait_for_daemons()
702 # Reconfigure client auth caps
703 for mount
in self
.mounts
:
704 self
.mds_cluster
.mon_manager
.raw_cluster_cmd_result(
705 'auth', 'caps', "client.{0}".format(mount
.client_id
),
708 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
709 fs_a
.get_data_pool_name(), fs_b
.get_data_pool_name()))
713 def test_clients(self
):
714 fs_a
, fs_b
= self
._setup
_two
()
716 # Mount a client on fs_a
717 self
.mount_a
.mount_wait(cephfs_name
=fs_a
.name
)
718 self
.mount_a
.write_n_mb("pad.bin", 1)
719 self
.mount_a
.write_n_mb("test.bin", 2)
720 a_created_ino
= self
.mount_a
.path_to_ino("test.bin")
721 self
.mount_a
.create_files()
723 # Mount a client on fs_b
724 self
.mount_b
.mount_wait(cephfs_name
=fs_b
.name
)
725 self
.mount_b
.write_n_mb("test.bin", 1)
726 b_created_ino
= self
.mount_b
.path_to_ino("test.bin")
727 self
.mount_b
.create_files()
729 # Check that a non-default filesystem mount survives an MDS
730 # failover (i.e. that map subscription is continuous, not
731 # just the first time), reproduces #16022
732 old_fs_b_mds
= fs_b
.get_active_names()[0]
733 self
.mds_cluster
.mds_stop(old_fs_b_mds
)
734 self
.mds_cluster
.mds_fail(old_fs_b_mds
)
735 fs_b
.wait_for_daemons()
736 background
= self
.mount_b
.write_background()
737 # Raise exception if the write doesn't finish (i.e. if client
738 # has not kept up with MDS failure)
740 self
.wait_until_true(lambda: background
.finished
, timeout
=30)
742 # The mount is stuck, we'll have to force it to fail cleanly
743 background
.stdin
.close()
744 self
.mount_b
.umount_wait(force
=True)
747 self
.mount_a
.umount_wait()
748 self
.mount_b
.umount_wait()
750 # See that the client's files went into the correct pool
751 self
.assertTrue(fs_a
.data_objects_present(a_created_ino
, 1024 * 1024))
752 self
.assertTrue(fs_b
.data_objects_present(b_created_ino
, 1024 * 1024))
754 def test_standby(self
):
755 fs_a
, fs_b
= self
._setup
_two
()
757 # Assert that the remaining two MDS daemons are now standbys
758 a_daemons
= fs_a
.get_active_names()
759 b_daemons
= fs_b
.get_active_names()
760 self
.assertEqual(len(a_daemons
), 1)
761 self
.assertEqual(len(b_daemons
), 1)
762 original_a
= a_daemons
[0]
763 original_b
= b_daemons
[0]
764 expect_standby_daemons
= set(self
.mds_cluster
.mds_ids
) - (set(a_daemons
) |
set(b_daemons
))
766 # Need all my standbys up as well as the active daemons
767 self
.wait_for_daemon_start()
768 self
.assertEqual(expect_standby_daemons
, self
.mds_cluster
.get_standby_daemons())
770 # Kill fs_a's active MDS, see a standby take over
771 self
.mds_cluster
.mds_stop(original_a
)
772 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("mds", "fail", original_a
)
773 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 1, 30,
774 reject_fn
=lambda v
: v
> 1)
775 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
776 self
.assertNotEqual(fs_a
.get_active_names()[0], original_a
)
778 # Kill fs_b's active MDS, see a standby take over
779 self
.mds_cluster
.mds_stop(original_b
)
780 self
.mds_cluster
.mon_manager
.raw_cluster_cmd("mds", "fail", original_b
)
781 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 1, 30,
782 reject_fn
=lambda v
: v
> 1)
783 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
784 self
.assertNotEqual(fs_b
.get_active_names()[0], original_b
)
786 # Both of the original active daemons should be gone, and all standbys used up
787 self
.assertEqual(self
.mds_cluster
.get_standby_daemons(), set())
789 # Restart the ones I killed, see them reappear as standbys
790 self
.mds_cluster
.mds_restart(original_a
)
791 self
.mds_cluster
.mds_restart(original_b
)
792 self
.wait_until_true(
793 lambda: {original_a
, original_b
} == self
.mds_cluster
.get_standby_daemons(),
797 def test_grow_shrink(self
):
799 fs_a
, fs_b
= self
._setup
_two
()
801 # Increase max_mds on fs_b, see a standby take up the role
803 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 2, 30,
804 reject_fn
=lambda v
: v
> 2 or v
< 1)
806 # Increase max_mds on fs_a, see a standby take up the role
808 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 2, 30,
809 reject_fn
=lambda v
: v
> 2 or v
< 1)
811 # Shrink fs_b back to 1, see a daemon go back to standby
813 self
.wait_until_equal(lambda: len(fs_b
.get_active_names()), 1, 30,
814 reject_fn
=lambda v
: v
> 2 or v
< 1)
816 # Grow fs_a up to 3, see the former fs_b daemon join it.
818 self
.wait_until_equal(lambda: len(fs_a
.get_active_names()), 3, 60,
819 reject_fn
=lambda v
: v
> 3 or v
< 2)