]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_failover.py
import ceph 16.2.7
[ceph.git] / ceph / qa / tasks / cephfs / test_failover.py
1 import time
2 import signal
3 import logging
4 import operator
5 from random import randint
6
7 from tasks.cephfs.cephfs_test_case import CephFSTestCase
8 from teuthology.exceptions import CommandFailedError
9 from tasks.cephfs.fuse_mount import FuseMount
10
11 log = logging.getLogger(__name__)
12
13 class TestClusterAffinity(CephFSTestCase):
14 CLIENTS_REQUIRED = 0
15 MDSS_REQUIRED = 4
16
17 def _verify_join_fs(self, target, status=None):
18 if status is None:
19 status = self.fs.wait_for_daemons(timeout=30)
20 log.debug("%s", status)
21 target = sorted(target, key=operator.itemgetter('name'))
22 log.info("target = %s", target)
23 current = list(status.get_all())
24 current = sorted(current, key=operator.itemgetter('name'))
25 log.info("current = %s", current)
26 self.assertEqual(len(current), len(target))
27 for i in range(len(current)):
28 for attr in target[i]:
29 self.assertIn(attr, current[i])
30 self.assertEqual(target[i][attr], current[i][attr])
31
32 def _change_target_state(self, state, name, changes):
33 for entity in state:
34 if entity['name'] == name:
35 for k, v in changes.items():
36 entity[k] = v
37 return
38 self.fail("no entity")
39
40 def _verify_init(self):
41 status = self.fs.status()
42 log.info("status = {0}".format(status))
43 target = [{'join_fscid': -1, 'name': info['name']} for info in status.get_all()]
44 self._verify_join_fs(target, status=status)
45 return (status, target)
46
47 def _reach_target(self, target):
48 def takeover():
49 try:
50 self._verify_join_fs(target)
51 return True
52 except AssertionError as e:
53 log.debug("%s", e)
54 return False
55 self.wait_until_true(takeover, 30)
56
57 def test_join_fs_runtime(self):
58 """
59 That setting mds_join_fs at runtime affects the cluster layout.
60 """
61 status, target = self._verify_init()
62 standbys = list(status.get_standbys())
63 self.config_set('mds.'+standbys[0]['name'], 'mds_join_fs', 'cephfs')
64 self._change_target_state(target, standbys[0]['name'], {'join_fscid': self.fs.id, 'state': 'up:active'})
65 self._reach_target(target)
66
67 def test_join_fs_unset(self):
68 """
69 That unsetting mds_join_fs will cause failover if another high-affinity standby exists.
70 """
71 status, target = self._verify_init()
72 standbys = list(status.get_standbys())
73 names = (standbys[0]['name'], standbys[1]['name'])
74 self.config_set('mds.'+names[0], 'mds_join_fs', 'cephfs')
75 self.config_set('mds.'+names[1], 'mds_join_fs', 'cephfs')
76 self._change_target_state(target, names[0], {'join_fscid': self.fs.id})
77 self._change_target_state(target, names[1], {'join_fscid': self.fs.id})
78 self._reach_target(target)
79 status = self.fs.status()
80 active = self.fs.get_active_names(status=status)[0]
81 self.assertIn(active, names)
82 self.config_rm('mds.'+active, 'mds_join_fs')
83 self._change_target_state(target, active, {'join_fscid': -1})
84 new_active = (set(names) - set((active,))).pop()
85 self._change_target_state(target, new_active, {'state': 'up:active'})
86 self._reach_target(target)
87
88 def test_join_fs_drop(self):
89 """
90 That unsetting mds_join_fs will not cause failover if no high-affinity standby exists.
91 """
92 status, target = self._verify_init()
93 standbys = list(status.get_standbys())
94 active = standbys[0]['name']
95 self.config_set('mds.'+active, 'mds_join_fs', 'cephfs')
96 self._change_target_state(target, active, {'join_fscid': self.fs.id, 'state': 'up:active'})
97 self._reach_target(target)
98 self.config_rm('mds.'+active, 'mds_join_fs')
99 self._change_target_state(target, active, {'join_fscid': -1})
100 self._reach_target(target)
101
102 def test_join_fs_vanilla(self):
103 """
104 That a vanilla standby is preferred over others with mds_join_fs set to another fs.
105 """
106 # After Octopus is EOL, we can remove this setting:
107 self.fs.set_allow_multifs()
108 fs2 = self.mds_cluster.newfs(name="cephfs2")
109 status, target = self._verify_init()
110 active = self.fs.get_active_names(status=status)[0]
111 standbys = [info['name'] for info in status.get_standbys()]
112 victim = standbys.pop()
113 # Set a bogus fs on the others
114 for mds in standbys:
115 self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2')
116 self._change_target_state(target, mds, {'join_fscid': fs2.id})
117 self.fs.rank_fail()
118 self._change_target_state(target, victim, {'state': 'up:active'})
119 self._reach_target(target)
120 status = self.fs.status()
121 active = self.fs.get_active_names(status=status)[0]
122 self.assertEqual(active, victim)
123
124 def test_join_fs_last_resort(self):
125 """
126 That a standby with mds_join_fs set to another fs is still used if necessary.
127 """
128 status, target = self._verify_init()
129 standbys = [info['name'] for info in status.get_standbys()]
130 for mds in standbys:
131 self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2')
132 # After Octopus is EOL, we can remove this setting:
133 self.fs.set_allow_multifs()
134 fs2 = self.mds_cluster.newfs(name="cephfs2")
135 for mds in standbys:
136 self._change_target_state(target, mds, {'join_fscid': fs2.id})
137 self.fs.rank_fail()
138 status = self.fs.status()
139 ranks = list(self.fs.get_ranks(status=status))
140 self.assertEqual(len(ranks), 1)
141 self.assertIn(ranks[0]['name'], standbys)
142 # Note that we would expect the former active to reclaim its spot, but
143 # we're not testing that here.
144
145 def test_join_fs_steady(self):
146 """
147 That a sole MDS with mds_join_fs set will come back as active eventually even after failover.
148 """
149 status, target = self._verify_init()
150 active = self.fs.get_active_names(status=status)[0]
151 self.config_set('mds.'+active, 'mds_join_fs', 'cephfs')
152 self._change_target_state(target, active, {'join_fscid': self.fs.id})
153 self._reach_target(target)
154 self.fs.rank_fail()
155 self._reach_target(target)
156
157 def test_join_fs_standby_replay(self):
158 """
159 That a standby-replay daemon with weak affinity is replaced by a stronger one.
160 """
161 status, target = self._verify_init()
162 standbys = [info['name'] for info in status.get_standbys()]
163 self.config_set('mds.'+standbys[0], 'mds_join_fs', 'cephfs')
164 self._change_target_state(target, standbys[0], {'join_fscid': self.fs.id, 'state': 'up:active'})
165 self._reach_target(target)
166 self.fs.set_allow_standby_replay(True)
167 status = self.fs.status()
168 standbys = [info['name'] for info in status.get_standbys()]
169 self.config_set('mds.'+standbys[0], 'mds_join_fs', 'cephfs')
170 self._change_target_state(target, standbys[0], {'join_fscid': self.fs.id, 'state': 'up:standby-replay'})
171 self._reach_target(target)
172
173 class TestClusterResize(CephFSTestCase):
174 CLIENTS_REQUIRED = 0
175 MDSS_REQUIRED = 3
176
177 def test_grow(self):
178 """
179 That the MDS cluster grows after increasing max_mds.
180 """
181
182 # Need all my standbys up as well as the active daemons
183 # self.wait_for_daemon_start() necessary?
184
185 self.fs.grow(2)
186 self.fs.grow(3)
187
188
189 def test_shrink(self):
190 """
191 That the MDS cluster shrinks automatically after decreasing max_mds.
192 """
193
194 self.fs.grow(3)
195 self.fs.shrink(1)
196
197 def test_up_less_than_max(self):
198 """
199 That a health warning is generated when max_mds is greater than active count.
200 """
201
202 status = self.fs.status()
203 mdss = [info['gid'] for info in status.get_all()]
204 self.fs.set_max_mds(len(mdss)+1)
205 self.wait_for_health("MDS_UP_LESS_THAN_MAX", 30)
206 self.fs.shrink(2)
207 self.wait_for_health_clear(30)
208
209 def test_down_health(self):
210 """
211 That marking a FS down does not generate a health warning
212 """
213
214 self.fs.set_down()
215 try:
216 self.wait_for_health("", 30)
217 raise RuntimeError("got health warning?")
218 except RuntimeError as e:
219 if "Timed out after" in str(e):
220 pass
221 else:
222 raise
223
224 def test_down_twice(self):
225 """
226 That marking a FS down twice does not wipe old_max_mds.
227 """
228
229 self.fs.grow(2)
230 self.fs.set_down()
231 self.fs.wait_for_daemons()
232 self.fs.set_down(False)
233 self.assertEqual(self.fs.get_var("max_mds"), 2)
234 self.fs.wait_for_daemons(timeout=60)
235
236 def test_down_grow(self):
237 """
238 That setting max_mds undoes down.
239 """
240
241 self.fs.set_down()
242 self.fs.wait_for_daemons()
243 self.fs.grow(2)
244 self.fs.wait_for_daemons()
245
246 def test_down(self):
247 """
248 That down setting toggles and sets max_mds appropriately.
249 """
250
251 self.fs.set_down()
252 self.fs.wait_for_daemons()
253 self.assertEqual(self.fs.get_var("max_mds"), 0)
254 self.fs.set_down(False)
255 self.assertEqual(self.fs.get_var("max_mds"), 1)
256 self.fs.wait_for_daemons()
257 self.assertEqual(self.fs.get_var("max_mds"), 1)
258
259 def test_hole(self):
260 """
261 Test that a hole cannot be created in the FS ranks.
262 """
263
264 fscid = self.fs.id
265
266 self.fs.grow(2)
267
268 # Now add a delay which should slow down how quickly rank 1 stops
269 self.config_set('mds', 'ms_inject_delay_max', '5.0')
270 self.config_set('mds', 'ms_inject_delay_probability', '1.0')
271 self.fs.set_max_mds(1)
272 log.info("status = {0}".format(self.fs.status()))
273
274 # Don't wait for rank 1 to stop
275 self.fs.set_max_mds(3)
276 log.info("status = {0}".format(self.fs.status()))
277
278 # Now check that the mons didn't try to promote a standby to rank 2
279 self.fs.set_max_mds(2)
280 status = self.fs.status()
281 try:
282 status = self.fs.wait_for_daemons(timeout=90)
283 ranks = set([info['rank'] for info in status.get_ranks(fscid)])
284 self.assertEqual(ranks, set([0, 1]))
285 finally:
286 log.info("status = {0}".format(status))
287
288 def test_thrash(self):
289 """
290 Test that thrashing max_mds does not fail.
291 """
292
293 max_mds = 2
294 for i in range(0, 100):
295 self.fs.set_max_mds(max_mds)
296 max_mds = (max_mds+1)%3+1
297
298 self.fs.wait_for_daemons(timeout=90)
299
300 class TestFailover(CephFSTestCase):
301 CLIENTS_REQUIRED = 1
302 MDSS_REQUIRED = 2
303
304 def test_simple(self):
305 """
306 That when the active MDS is killed, a standby MDS is promoted into
307 its rank after the grace period.
308
309 This is just a simple unit test, the harder cases are covered
310 in thrashing tests.
311 """
312
313 # Need all my standbys up as well as the active daemons
314 self.wait_for_daemon_start()
315
316 (original_active, ) = self.fs.get_active_names()
317 original_standbys = self.mds_cluster.get_standby_daemons()
318
319 # Kill the rank 0 daemon's physical process
320 self.fs.mds_stop(original_active)
321
322 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
323
324 # Wait until the monitor promotes his replacement
325 def promoted():
326 active = self.fs.get_active_names()
327 return active and active[0] in original_standbys
328
329 log.info("Waiting for promotion of one of the original standbys {0}".format(
330 original_standbys))
331 self.wait_until_true(
332 promoted,
333 timeout=grace*2)
334
335 # Start the original rank 0 daemon up again, see that he becomes a standby
336 self.fs.mds_restart(original_active)
337 self.wait_until_true(
338 lambda: original_active in self.mds_cluster.get_standby_daemons(),
339 timeout=60 # Approximately long enough for MDS to start and mon to notice
340 )
341
342 def test_client_abort(self):
343 """
344 That a client will respect fuse_require_active_mds and error out
345 when the cluster appears to be unavailable.
346 """
347
348 if not isinstance(self.mount_a, FuseMount):
349 self.skipTest("Requires FUSE client to inject client metadata")
350
351 require_active = self.fs.get_config("fuse_require_active_mds", service_type="mon").lower() == "true"
352 if not require_active:
353 self.skipTest("fuse_require_active_mds is not set")
354
355 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
356
357 # Check it's not laggy to begin with
358 (original_active, ) = self.fs.get_active_names()
359 self.assertNotIn("laggy_since", self.fs.status().get_mds(original_active))
360
361 self.mounts[0].umount_wait()
362
363 # Control: that we can mount and unmount usually, while the cluster is healthy
364 self.mounts[0].mount_wait()
365 self.mounts[0].umount_wait()
366
367 # Stop the daemon processes
368 self.fs.mds_stop()
369
370 # Wait for everyone to go laggy
371 def laggy():
372 mdsmap = self.fs.get_mds_map()
373 for info in mdsmap['info'].values():
374 if "laggy_since" not in info:
375 return False
376
377 return True
378
379 self.wait_until_true(laggy, grace * 2)
380 with self.assertRaises(CommandFailedError):
381 self.mounts[0].mount_wait()
382
383 def test_standby_count_wanted(self):
384 """
385 That cluster health warnings are generated by insufficient standbys available.
386 """
387
388 # Need all my standbys up as well as the active daemons
389 self.wait_for_daemon_start()
390
391 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
392
393 standbys = self.mds_cluster.get_standby_daemons()
394 self.assertGreaterEqual(len(standbys), 1)
395 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
396
397 # Kill a standby and check for warning
398 victim = standbys.pop()
399 self.fs.mds_stop(victim)
400 log.info("waiting for insufficient standby daemon warning")
401 self.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace*2)
402
403 # restart the standby, see that he becomes a standby, check health clears
404 self.fs.mds_restart(victim)
405 self.wait_until_true(
406 lambda: victim in self.mds_cluster.get_standby_daemons(),
407 timeout=60 # Approximately long enough for MDS to start and mon to notice
408 )
409 self.wait_for_health_clear(timeout=30)
410
411 # Set it one greater than standbys ever seen
412 standbys = self.mds_cluster.get_standby_daemons()
413 self.assertGreaterEqual(len(standbys), 1)
414 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
415 log.info("waiting for insufficient standby daemon warning")
416 self.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace*2)
417
418 # Set it to 0
419 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
420 self.wait_for_health_clear(timeout=30)
421
422 def test_discontinuous_mdsmap(self):
423 """
424 That discontinuous mdsmap does not affect failover.
425 See http://tracker.ceph.com/issues/24856.
426 """
427 self.fs.set_max_mds(2)
428 status = self.fs.wait_for_daemons()
429
430 self.mount_a.umount_wait()
431
432 grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
433 monc_timeout = float(self.fs.get_config("mon_client_ping_timeout", service_type="mds"))
434
435 mds_0 = self.fs.get_rank(rank=0, status=status)
436 self.fs.rank_freeze(True, rank=0) # prevent failover
437 self.fs.rank_signal(signal.SIGSTOP, rank=0, status=status)
438 self.wait_until_true(
439 lambda: "laggy_since" in self.fs.get_rank(),
440 timeout=grace * 2
441 )
442
443 self.fs.rank_fail(rank=1)
444 self.fs.wait_for_state('up:resolve', rank=1, timeout=30)
445
446 # Make sure of mds_0's monitor connection gets reset
447 time.sleep(monc_timeout * 2)
448
449 # Continue rank 0, it will get discontinuous mdsmap
450 self.fs.rank_signal(signal.SIGCONT, rank=0)
451 self.wait_until_true(
452 lambda: "laggy_since" not in self.fs.get_rank(rank=0),
453 timeout=grace * 2
454 )
455
456 # mds.b will be stuck at 'reconnect' state if snapserver gets confused
457 # by discontinuous mdsmap
458 self.fs.wait_for_state('up:active', rank=1, timeout=30)
459 self.assertEqual(mds_0['gid'], self.fs.get_rank(rank=0)['gid'])
460 self.fs.rank_freeze(False, rank=0)
461
462 class TestStandbyReplay(CephFSTestCase):
463 CLIENTS_REQUIRED = 0
464 MDSS_REQUIRED = 4
465
466 def _confirm_no_replay(self):
467 status = self.fs.status()
468 _ = len(list(status.get_standbys()))
469 self.assertEqual(0, len(list(self.fs.get_replays(status=status))))
470 return status
471
472 def _confirm_single_replay(self, full=True, status=None, retries=3):
473 status = self.fs.wait_for_daemons(status=status)
474 ranks = sorted(self.fs.get_mds_map(status=status)['in'])
475 replays = list(self.fs.get_replays(status=status))
476 checked_replays = set()
477 for rank in ranks:
478 has_replay = False
479 for replay in replays:
480 if replay['rank'] == rank:
481 self.assertFalse(has_replay)
482 has_replay = True
483 checked_replays.add(replay['gid'])
484 if full and not has_replay:
485 if retries <= 0:
486 raise RuntimeError("rank "+str(rank)+" has no standby-replay follower")
487 else:
488 retries = retries-1
489 time.sleep(2)
490 self.assertEqual(checked_replays, set(info['gid'] for info in replays))
491 return status
492
493 def _check_replay_takeover(self, status, rank=0):
494 replay = self.fs.get_replay(rank=rank, status=status)
495 new_status = self.fs.wait_for_daemons()
496 new_active = self.fs.get_rank(rank=rank, status=new_status)
497 if replay:
498 self.assertEqual(replay['gid'], new_active['gid'])
499 else:
500 # double check takeover came from a standby (or some new daemon via restart)
501 found = False
502 for info in status.get_standbys():
503 if info['gid'] == new_active['gid']:
504 found = True
505 break
506 if not found:
507 for info in status.get_all():
508 self.assertNotEqual(info['gid'], new_active['gid'])
509 return new_status
510
511 def test_standby_replay_singleton(self):
512 """
513 That only one MDS becomes standby-replay.
514 """
515
516 self._confirm_no_replay()
517 self.fs.set_allow_standby_replay(True)
518 time.sleep(30)
519 self._confirm_single_replay()
520
521 def test_standby_replay_damaged(self):
522 """
523 That a standby-replay daemon can cause the rank to go damaged correctly.
524 """
525
526 self._confirm_no_replay()
527 self.config_set("mds", "mds_standby_replay_damaged", True)
528 self.fs.set_allow_standby_replay(True)
529 self.wait_until_true(
530 lambda: len(self.fs.get_damaged()) > 0,
531 timeout=30
532 )
533 status = self.fs.status()
534 self.assertListEqual([], list(self.fs.get_ranks(status=status)))
535 self.assertListEqual([0], self.fs.get_damaged(status=status))
536
537 def test_standby_replay_disable(self):
538 """
539 That turning off allow_standby_replay fails all standby-replay daemons.
540 """
541
542 self._confirm_no_replay()
543 self.fs.set_allow_standby_replay(True)
544 time.sleep(30)
545 self._confirm_single_replay()
546 self.fs.set_allow_standby_replay(False)
547 self._confirm_no_replay()
548
549 def test_standby_replay_singleton_fail(self):
550 """
551 That failures don't violate singleton constraint.
552 """
553
554 self._confirm_no_replay()
555 self.fs.set_allow_standby_replay(True)
556 status = self._confirm_single_replay()
557
558 for i in range(10):
559 time.sleep(randint(1, 5))
560 self.fs.rank_restart(status=status)
561 status = self._check_replay_takeover(status)
562 status = self._confirm_single_replay(status=status)
563
564 for i in range(10):
565 time.sleep(randint(1, 5))
566 self.fs.rank_fail()
567 status = self._check_replay_takeover(status)
568 status = self._confirm_single_replay(status=status)
569
570 def test_standby_replay_singleton_fail_multimds(self):
571 """
572 That failures don't violate singleton constraint with multiple actives.
573 """
574
575 status = self._confirm_no_replay()
576 new_max_mds = randint(2, len(list(status.get_standbys())))
577 self.fs.set_max_mds(new_max_mds)
578 self.fs.wait_for_daemons() # wait for actives to come online!
579 self.fs.set_allow_standby_replay(True)
580 status = self._confirm_single_replay(full=False)
581
582 for i in range(10):
583 time.sleep(randint(1, 5))
584 victim = randint(0, new_max_mds-1)
585 self.fs.rank_restart(rank=victim, status=status)
586 status = self._check_replay_takeover(status, rank=victim)
587 status = self._confirm_single_replay(status=status, full=False)
588
589 for i in range(10):
590 time.sleep(randint(1, 5))
591 victim = randint(0, new_max_mds-1)
592 self.fs.rank_fail(rank=victim)
593 status = self._check_replay_takeover(status, rank=victim)
594 status = self._confirm_single_replay(status=status, full=False)
595
596 def test_standby_replay_failure(self):
597 """
598 That the failure of a standby-replay daemon happens cleanly
599 and doesn't interrupt anything else.
600 """
601
602 status = self._confirm_no_replay()
603 self.fs.set_max_mds(1)
604 self.fs.set_allow_standby_replay(True)
605 status = self._confirm_single_replay()
606
607 for i in range(10):
608 time.sleep(randint(1, 5))
609 victim = self.fs.get_replay(status=status)
610 self.fs.mds_restart(mds_id=victim['name'])
611 status = self._confirm_single_replay(status=status)
612
613 def test_rank_stopped(self):
614 """
615 That when a rank is STOPPED, standby replays for
616 that rank get torn down
617 """
618
619 status = self._confirm_no_replay()
620 standby_count = len(list(status.get_standbys()))
621 self.fs.set_max_mds(2)
622 self.fs.set_allow_standby_replay(True)
623 status = self._confirm_single_replay()
624
625 self.fs.set_max_mds(1) # stop rank 1
626
627 status = self._confirm_single_replay()
628 self.assertTrue(standby_count, len(list(status.get_standbys())))
629
630
631 class TestMultiFilesystems(CephFSTestCase):
632 CLIENTS_REQUIRED = 2
633 MDSS_REQUIRED = 4
634
635 # We'll create our own filesystems and start our own daemons
636 REQUIRE_FILESYSTEM = False
637
638 def setUp(self):
639 super(TestMultiFilesystems, self).setUp()
640 self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
641 "enable_multiple", "true",
642 "--yes-i-really-mean-it")
643
644 def _setup_two(self):
645 fs_a = self.mds_cluster.newfs(name="alpha")
646 fs_b = self.mds_cluster.newfs(name="bravo")
647
648 self.mds_cluster.mds_restart()
649
650 # Wait for both filesystems to go healthy
651 fs_a.wait_for_daemons()
652 fs_b.wait_for_daemons()
653
654 # Reconfigure client auth caps
655 for mount in self.mounts:
656 self.mds_cluster.mon_manager.raw_cluster_cmd_result(
657 'auth', 'caps', "client.{0}".format(mount.client_id),
658 'mds', 'allow',
659 'mon', 'allow r',
660 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
661 fs_a.get_data_pool_name(), fs_b.get_data_pool_name()))
662
663 return fs_a, fs_b
664
665 def test_clients(self):
666 fs_a, fs_b = self._setup_two()
667
668 # Mount a client on fs_a
669 self.mount_a.mount_wait(cephfs_name=fs_a.name)
670 self.mount_a.write_n_mb("pad.bin", 1)
671 self.mount_a.write_n_mb("test.bin", 2)
672 a_created_ino = self.mount_a.path_to_ino("test.bin")
673 self.mount_a.create_files()
674
675 # Mount a client on fs_b
676 self.mount_b.mount_wait(cephfs_name=fs_b.name)
677 self.mount_b.write_n_mb("test.bin", 1)
678 b_created_ino = self.mount_b.path_to_ino("test.bin")
679 self.mount_b.create_files()
680
681 # Check that a non-default filesystem mount survives an MDS
682 # failover (i.e. that map subscription is continuous, not
683 # just the first time), reproduces #16022
684 old_fs_b_mds = fs_b.get_active_names()[0]
685 self.mds_cluster.mds_stop(old_fs_b_mds)
686 self.mds_cluster.mds_fail(old_fs_b_mds)
687 fs_b.wait_for_daemons()
688 background = self.mount_b.write_background()
689 # Raise exception if the write doesn't finish (i.e. if client
690 # has not kept up with MDS failure)
691 try:
692 self.wait_until_true(lambda: background.finished, timeout=30)
693 except RuntimeError:
694 # The mount is stuck, we'll have to force it to fail cleanly
695 background.stdin.close()
696 self.mount_b.umount_wait(force=True)
697 raise
698
699 self.mount_a.umount_wait()
700 self.mount_b.umount_wait()
701
702 # See that the client's files went into the correct pool
703 self.assertTrue(fs_a.data_objects_present(a_created_ino, 1024 * 1024))
704 self.assertTrue(fs_b.data_objects_present(b_created_ino, 1024 * 1024))
705
706 def test_standby(self):
707 fs_a, fs_b = self._setup_two()
708
709 # Assert that the remaining two MDS daemons are now standbys
710 a_daemons = fs_a.get_active_names()
711 b_daemons = fs_b.get_active_names()
712 self.assertEqual(len(a_daemons), 1)
713 self.assertEqual(len(b_daemons), 1)
714 original_a = a_daemons[0]
715 original_b = b_daemons[0]
716 expect_standby_daemons = set(self.mds_cluster.mds_ids) - (set(a_daemons) | set(b_daemons))
717
718 # Need all my standbys up as well as the active daemons
719 self.wait_for_daemon_start()
720 self.assertEqual(expect_standby_daemons, self.mds_cluster.get_standby_daemons())
721
722 # Kill fs_a's active MDS, see a standby take over
723 self.mds_cluster.mds_stop(original_a)
724 self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_a)
725 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
726 reject_fn=lambda v: v > 1)
727 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
728 self.assertNotEqual(fs_a.get_active_names()[0], original_a)
729
730 # Kill fs_b's active MDS, see a standby take over
731 self.mds_cluster.mds_stop(original_b)
732 self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_b)
733 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
734 reject_fn=lambda v: v > 1)
735 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
736 self.assertNotEqual(fs_b.get_active_names()[0], original_b)
737
738 # Both of the original active daemons should be gone, and all standbys used up
739 self.assertEqual(self.mds_cluster.get_standby_daemons(), set())
740
741 # Restart the ones I killed, see them reappear as standbys
742 self.mds_cluster.mds_restart(original_a)
743 self.mds_cluster.mds_restart(original_b)
744 self.wait_until_true(
745 lambda: {original_a, original_b} == self.mds_cluster.get_standby_daemons(),
746 timeout=30
747 )
748
749 def test_grow_shrink(self):
750 # Usual setup...
751 fs_a, fs_b = self._setup_two()
752
753 # Increase max_mds on fs_b, see a standby take up the role
754 fs_b.set_max_mds(2)
755 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 2, 30,
756 reject_fn=lambda v: v > 2 or v < 1)
757
758 # Increase max_mds on fs_a, see a standby take up the role
759 fs_a.set_max_mds(2)
760 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 2, 30,
761 reject_fn=lambda v: v > 2 or v < 1)
762
763 # Shrink fs_b back to 1, see a daemon go back to standby
764 fs_b.set_max_mds(1)
765 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
766 reject_fn=lambda v: v > 2 or v < 1)
767
768 # Grow fs_a up to 3, see the former fs_b daemon join it.
769 fs_a.set_max_mds(3)
770 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 3, 60,
771 reject_fn=lambda v: v > 3 or v < 2)