]> git.proxmox.com Git - ceph.git/blob - ceph/qa/tasks/cephfs/test_failover.py
bump version to 18.2.2-pve1
[ceph.git] / ceph / qa / tasks / cephfs / test_failover.py
1 import time
2 import signal
3 import logging
4 import operator
5 from random import randint, choice
6
7 from tasks.cephfs.cephfs_test_case import CephFSTestCase
8 from teuthology.exceptions import CommandFailedError
9 from tasks.cephfs.fuse_mount import FuseMount
10
11 log = logging.getLogger(__name__)
12
13 class TestClusterAffinity(CephFSTestCase):
14 CLIENTS_REQUIRED = 0
15 MDSS_REQUIRED = 4
16
17 def _verify_join_fs(self, target, status=None, fs=None):
18 fs_select = fs
19 if fs_select is None:
20 fs_select = self.fs
21 if status is None:
22 status = fs_select.wait_for_daemons(timeout=30)
23 log.debug("%s", status)
24 target = sorted(target, key=operator.itemgetter('name'))
25 log.info("target = %s", target)
26 current = list(status.get_all())
27 current = sorted(current, key=operator.itemgetter('name'))
28 log.info("current = %s", current)
29 self.assertEqual(len(current), len(target))
30 for i in range(len(current)):
31 for attr in target[i]:
32 self.assertIn(attr, current[i])
33 self.assertEqual(target[i][attr], current[i][attr])
34
35 def _change_target_state(self, state, name, changes):
36 for entity in state:
37 if entity['name'] == name:
38 for k, v in changes.items():
39 entity[k] = v
40 return
41 self.fail("no entity")
42
43 def _verify_init(self, fs=None):
44 fs_select = fs
45 if fs_select is None:
46 fs_select = self.fs
47 status = fs_select.status()
48 log.info("status = {0}".format(status))
49 target = [{'join_fscid': -1, 'name': info['name']} for info in status.get_all()]
50 self._verify_join_fs(target, status=status, fs=fs_select)
51 return (status, target)
52
53 def _reach_target(self, target):
54 def takeover():
55 try:
56 self._verify_join_fs(target)
57 return True
58 except AssertionError as e:
59 log.debug("%s", e)
60 return False
61 self.wait_until_true(takeover, 30)
62
63 def test_join_fs_runtime(self):
64 """
65 That setting mds_join_fs at runtime affects the cluster layout.
66 """
67 status, target = self._verify_init()
68 standbys = list(status.get_standbys())
69 self.config_set('mds.'+standbys[0]['name'], 'mds_join_fs', 'cephfs')
70 self._change_target_state(target, standbys[0]['name'], {'join_fscid': self.fs.id, 'state': 'up:active'})
71 self._reach_target(target)
72
73 def test_join_fs_unset(self):
74 """
75 That unsetting mds_join_fs will cause failover if another high-affinity standby exists.
76 """
77 status, target = self._verify_init()
78 standbys = list(status.get_standbys())
79 names = (standbys[0]['name'], standbys[1]['name'])
80 self.config_set('mds.'+names[0], 'mds_join_fs', 'cephfs')
81 self.config_set('mds.'+names[1], 'mds_join_fs', 'cephfs')
82 self._change_target_state(target, names[0], {'join_fscid': self.fs.id})
83 self._change_target_state(target, names[1], {'join_fscid': self.fs.id})
84 self._reach_target(target)
85 time.sleep(5) # MDSMonitor tick
86 status = self.fs.wait_for_daemons()
87 active = self.fs.get_active_names(status=status)[0]
88 self.assertIn(active, names)
89 self.config_rm('mds.'+active, 'mds_join_fs')
90 self._change_target_state(target, active, {'join_fscid': -1})
91 new_active = (set(names) - set((active,))).pop()
92 self._change_target_state(target, new_active, {'state': 'up:active'})
93 self._reach_target(target)
94
95 def test_join_fs_drop(self):
96 """
97 That unsetting mds_join_fs will not cause failover if no high-affinity standby exists.
98 """
99 status, target = self._verify_init()
100 standbys = list(status.get_standbys())
101 active = standbys[0]['name']
102 self.config_set('mds.'+active, 'mds_join_fs', 'cephfs')
103 self._change_target_state(target, active, {'join_fscid': self.fs.id, 'state': 'up:active'})
104 self._reach_target(target)
105 self.config_rm('mds.'+active, 'mds_join_fs')
106 self._change_target_state(target, active, {'join_fscid': -1})
107 self._reach_target(target)
108
109 def test_join_fs_vanilla(self):
110 """
111 That a vanilla standby is preferred over others with mds_join_fs set to another fs.
112 """
113 fs2 = self.mds_cluster.newfs(name="cephfs2")
114 status, target = self._verify_init()
115 active = self.fs.get_active_names(status=status)[0]
116 status2, _ = self._verify_init(fs=fs2)
117 active2 = fs2.get_active_names(status=status2)[0]
118 standbys = [info['name'] for info in status.get_standbys()]
119 victim = standbys.pop()
120 # Set a bogus fs on the others
121 for mds in standbys:
122 self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2')
123 self._change_target_state(target, mds, {'join_fscid': fs2.id})
124 # The active MDS for cephfs2 will be replaced by the MDS for which
125 # file system affinity has been set. Also, set the affinity for
126 # the earlier active MDS so that it is not chosen by the monitors
127 # as an active MDS for the existing file system.
128 log.info(f'assigning affinity to cephfs2 for active mds (mds.{active2})')
129 self.config_set(f'mds.{active2}', 'mds_join_fs', 'cephfs2')
130 self._change_target_state(target, active2, {'join_fscid': fs2.id})
131 self.fs.rank_fail()
132 self._change_target_state(target, victim, {'state': 'up:active'})
133 self._reach_target(target)
134 status = self.fs.status()
135 active = self.fs.get_active_names(status=status)[0]
136 self.assertEqual(active, victim)
137
138 def test_join_fs_last_resort(self):
139 """
140 That a standby with mds_join_fs set to another fs is still used if necessary.
141 """
142 status, target = self._verify_init()
143 standbys = [info['name'] for info in status.get_standbys()]
144 for mds in standbys:
145 self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2')
146 fs2 = self.mds_cluster.newfs(name="cephfs2")
147 for mds in standbys:
148 self._change_target_state(target, mds, {'join_fscid': fs2.id})
149 self.fs.rank_fail()
150 status = self.fs.status()
151 ranks = list(self.fs.get_ranks(status=status))
152 self.assertEqual(len(ranks), 1)
153 self.assertIn(ranks[0]['name'], standbys)
154 # Note that we would expect the former active to reclaim its spot, but
155 # we're not testing that here.
156
157 def test_join_fs_steady(self):
158 """
159 That a sole MDS with mds_join_fs set will come back as active eventually even after failover.
160 """
161 status, target = self._verify_init()
162 active = self.fs.get_active_names(status=status)[0]
163 self.config_set('mds.'+active, 'mds_join_fs', 'cephfs')
164 self._change_target_state(target, active, {'join_fscid': self.fs.id})
165 self._reach_target(target)
166 self.fs.rank_fail()
167 self._reach_target(target)
168
169 def test_join_fs_standby_replay(self):
170 """
171 That a standby-replay daemon with weak affinity is replaced by a stronger one.
172 """
173 status, target = self._verify_init()
174 standbys = [info['name'] for info in status.get_standbys()]
175 self.config_set('mds.'+standbys[0], 'mds_join_fs', 'cephfs')
176 self._change_target_state(target, standbys[0], {'join_fscid': self.fs.id, 'state': 'up:active'})
177 self._reach_target(target)
178 self.fs.set_allow_standby_replay(True)
179 status = self.fs.status()
180 standbys = [info['name'] for info in status.get_standbys()]
181 self.config_set('mds.'+standbys[0], 'mds_join_fs', 'cephfs')
182 self._change_target_state(target, standbys[0], {'join_fscid': self.fs.id, 'state': 'up:standby-replay'})
183 self._reach_target(target)
184
185 class TestClusterResize(CephFSTestCase):
186 CLIENTS_REQUIRED = 0
187 MDSS_REQUIRED = 3
188
189 def test_grow(self):
190 """
191 That the MDS cluster grows after increasing max_mds.
192 """
193
194 # Need all my standbys up as well as the active daemons
195 # self.wait_for_daemon_start() necessary?
196
197 self.fs.grow(2)
198 self.fs.grow(3)
199
200
201 def test_shrink(self):
202 """
203 That the MDS cluster shrinks automatically after decreasing max_mds.
204 """
205
206 self.fs.grow(3)
207 self.fs.shrink(1)
208
209 def test_up_less_than_max(self):
210 """
211 That a health warning is generated when max_mds is greater than active count.
212 """
213
214 status = self.fs.status()
215 mdss = [info['gid'] for info in status.get_all()]
216 self.fs.set_max_mds(len(mdss)+1)
217 self.wait_for_health("MDS_UP_LESS_THAN_MAX", 30)
218 self.fs.shrink(2)
219 self.wait_for_health_clear(30)
220
221 def test_down_health(self):
222 """
223 That marking a FS down does not generate a health warning
224 """
225
226 self.fs.set_down()
227 try:
228 self.wait_for_health("", 30)
229 raise RuntimeError("got health warning?")
230 except RuntimeError as e:
231 if "Timed out after" in str(e):
232 pass
233 else:
234 raise
235
236 def test_down_twice(self):
237 """
238 That marking a FS down twice does not wipe old_max_mds.
239 """
240
241 self.fs.grow(2)
242 self.fs.set_down()
243 self.fs.wait_for_daemons()
244 self.fs.set_down(False)
245 self.assertEqual(self.fs.get_var("max_mds"), 2)
246 self.fs.wait_for_daemons(timeout=60)
247
248 def test_down_grow(self):
249 """
250 That setting max_mds undoes down.
251 """
252
253 self.fs.set_down()
254 self.fs.wait_for_daemons()
255 self.fs.grow(2)
256 self.fs.wait_for_daemons()
257
258 def test_down(self):
259 """
260 That down setting toggles and sets max_mds appropriately.
261 """
262
263 self.fs.set_down()
264 self.fs.wait_for_daemons()
265 self.assertEqual(self.fs.get_var("max_mds"), 0)
266 self.fs.set_down(False)
267 self.assertEqual(self.fs.get_var("max_mds"), 1)
268 self.fs.wait_for_daemons()
269 self.assertEqual(self.fs.get_var("max_mds"), 1)
270
271 def test_hole(self):
272 """
273 Test that a hole cannot be created in the FS ranks.
274 """
275
276 fscid = self.fs.id
277
278 self.fs.grow(2)
279
280 # Now add a delay which should slow down how quickly rank 1 stops
281 self.config_set('mds', 'ms_inject_delay_max', '5.0')
282 self.config_set('mds', 'ms_inject_delay_probability', '1.0')
283 self.fs.set_max_mds(1)
284 log.info("status = {0}".format(self.fs.status()))
285
286 # Don't wait for rank 1 to stop
287 self.fs.set_max_mds(3)
288 log.info("status = {0}".format(self.fs.status()))
289
290 # Now check that the mons didn't try to promote a standby to rank 2
291 self.fs.set_max_mds(2)
292 status = self.fs.status()
293 try:
294 status = self.fs.wait_for_daemons(timeout=90)
295 ranks = set([info['rank'] for info in status.get_ranks(fscid)])
296 self.assertEqual(ranks, set([0, 1]))
297 finally:
298 log.info("status = {0}".format(status))
299
300 def test_thrash(self):
301 """
302 Test that thrashing max_mds does not fail.
303 """
304
305 max_mds = 2
306 for i in range(0, 100):
307 self.fs.set_max_mds(max_mds)
308 max_mds = (max_mds+1)%3+1
309
310 self.fs.wait_for_daemons(timeout=90)
311
312 class TestFailover(CephFSTestCase):
313 CLIENTS_REQUIRED = 1
314 MDSS_REQUIRED = 2
315
316 def test_repeated_boot(self):
317 """
318 That multiple boot messages do not result in the MDS getting evicted.
319 """
320
321 interval = 10
322 self.config_set("mon", "paxos_propose_interval", interval)
323
324 mds = choice(list(self.fs.status().get_all()))
325
326 with self.assert_cluster_log(f"daemon mds.{mds['name']} restarted", present=False):
327 # Avoid a beacon to the monitors with down:dne by restarting:
328 self.fs.mds_fail(mds_id=mds['name'])
329 # `ceph mds fail` won't return until the FSMap is committed, double-check:
330 self.assertIsNone(self.fs.status().get_mds_gid(mds['gid']))
331 time.sleep(2) # for mds to restart and accept asok commands
332 status1 = self.fs.mds_asok(['status'], mds_id=mds['name'])
333 time.sleep(interval*1.5)
334 status2 = self.fs.mds_asok(['status'], mds_id=mds['name'])
335 self.assertEqual(status1['id'], status2['id'])
336
337 def test_simple(self):
338 """
339 That when the active MDS is killed, a standby MDS is promoted into
340 its rank after the grace period.
341
342 This is just a simple unit test, the harder cases are covered
343 in thrashing tests.
344 """
345
346 (original_active, ) = self.fs.get_active_names()
347 original_standbys = self.mds_cluster.get_standby_daemons()
348
349 # Kill the rank 0 daemon's physical process
350 self.fs.mds_stop(original_active)
351
352 # Wait until the monitor promotes his replacement
353 def promoted():
354 ranks = list(self.fs.get_ranks())
355 return len(ranks) > 0 and ranks[0]['name'] in original_standbys
356
357 log.info("Waiting for promotion of one of the original standbys {0}".format(
358 original_standbys))
359 self.wait_until_true(promoted, timeout=self.fs.beacon_timeout)
360
361 # Start the original rank 0 daemon up again, see that he becomes a standby
362 self.fs.mds_restart(original_active)
363 self.wait_until_true(
364 lambda: original_active in self.mds_cluster.get_standby_daemons(),
365 timeout=60 # Approximately long enough for MDS to start and mon to notice
366 )
367
368 def test_client_abort(self):
369 """
370 That a client will respect fuse_require_active_mds and error out
371 when the cluster appears to be unavailable.
372 """
373
374 if not isinstance(self.mount_a, FuseMount):
375 self.skipTest("Requires FUSE client to inject client metadata")
376
377 require_active = self.fs.get_config("fuse_require_active_mds", service_type="mon").lower() == "true"
378 if not require_active:
379 self.skipTest("fuse_require_active_mds is not set")
380
381 # Check it's not laggy to begin with
382 (original_active, ) = self.fs.get_active_names()
383 self.assertNotIn("laggy_since", self.fs.status().get_mds(original_active))
384
385 self.mounts[0].umount_wait()
386
387 # Control: that we can mount and unmount usually, while the cluster is healthy
388 self.mounts[0].mount_wait()
389 self.mounts[0].umount_wait()
390
391 # Stop the daemon processes
392 self.fs.mds_stop()
393
394 # Wait for everyone to go laggy
395 def laggy():
396 mdsmap = self.fs.get_mds_map()
397 for info in mdsmap['info'].values():
398 if "laggy_since" not in info:
399 return False
400
401 return True
402
403 self.wait_until_true(laggy, self.fs.beacon_timeout)
404 with self.assertRaises(CommandFailedError):
405 self.mounts[0].mount_wait()
406
407 def test_standby_count_wanted(self):
408 """
409 That cluster health warnings are generated by insufficient standbys available.
410 """
411
412 # Need all my standbys up as well as the active daemons
413 self.wait_for_daemon_start()
414
415 standbys = self.mds_cluster.get_standby_daemons()
416 self.assertGreaterEqual(len(standbys), 1)
417 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
418
419 # Kill a standby and check for warning
420 victim = standbys.pop()
421 self.fs.mds_stop(victim)
422 self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
423
424 # restart the standby, see that he becomes a standby, check health clears
425 self.fs.mds_restart(victim)
426 self.wait_until_true(
427 lambda: victim in self.mds_cluster.get_standby_daemons(),
428 timeout=60 # Approximately long enough for MDS to start and mon to notice
429 )
430 self.wait_for_health_clear(timeout=30)
431
432 # Set it one greater than standbys ever seen
433 standbys = self.mds_cluster.get_standby_daemons()
434 self.assertGreaterEqual(len(standbys), 1)
435 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
436 self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
437
438 # Set it to 0
439 self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
440 self.wait_for_health_clear(timeout=30)
441
442 def test_discontinuous_mdsmap(self):
443 """
444 That discontinuous mdsmap does not affect failover.
445 See http://tracker.ceph.com/issues/24856.
446 """
447 self.fs.set_max_mds(2)
448 status = self.fs.wait_for_daemons()
449
450 self.mount_a.umount_wait()
451
452 monc_timeout = float(self.fs.get_config("mon_client_ping_timeout", service_type="mds"))
453
454 mds_0 = self.fs.get_rank(rank=0, status=status)
455 self.fs.rank_freeze(True, rank=0) # prevent failover
456 self.fs.rank_signal(signal.SIGSTOP, rank=0, status=status)
457 self.wait_until_true(
458 lambda: "laggy_since" in self.fs.get_rank(),
459 timeout=self.fs.beacon_timeout
460 )
461
462 self.fs.rank_fail(rank=1)
463 self.fs.wait_for_state('up:resolve', rank=1, timeout=30)
464
465 # Make sure of mds_0's monitor connection gets reset
466 time.sleep(monc_timeout * 2)
467
468 # Continue rank 0, it will get discontinuous mdsmap
469 self.fs.rank_signal(signal.SIGCONT, rank=0)
470 self.wait_until_true(
471 lambda: "laggy_since" not in self.fs.get_rank(rank=0),
472 timeout=self.fs.beacon_timeout
473 )
474
475 # mds.b will be stuck at 'reconnect' state if snapserver gets confused
476 # by discontinuous mdsmap
477 self.fs.wait_for_state('up:active', rank=1, timeout=30)
478 self.assertEqual(mds_0['gid'], self.fs.get_rank(rank=0)['gid'])
479 self.fs.rank_freeze(False, rank=0)
480
481 def test_connect_bootstrapping(self):
482 self.config_set("mds", "mds_sleep_rank_change", 10000000.0)
483 self.config_set("mds", "mds_connect_bootstrapping", True)
484 self.fs.set_max_mds(2)
485 self.fs.wait_for_daemons()
486 self.fs.rank_fail(rank=0)
487 # rank 0 will get stuck in up:resolve, see https://tracker.ceph.com/issues/53194
488 self.fs.wait_for_daemons()
489
490
491 class TestStandbyReplay(CephFSTestCase):
492 CLIENTS_REQUIRED = 0
493 MDSS_REQUIRED = 4
494
495 def _confirm_no_replay(self):
496 status = self.fs.status()
497 _ = len(list(status.get_standbys()))
498 self.assertEqual(0, len(list(self.fs.get_replays(status=status))))
499 return status
500
501 def _confirm_single_replay(self, full=True, status=None, retries=3):
502 status = self.fs.wait_for_daemons(status=status)
503 ranks = sorted(self.fs.get_mds_map(status=status)['in'])
504 replays = list(self.fs.get_replays(status=status))
505 checked_replays = set()
506 for rank in ranks:
507 has_replay = False
508 for replay in replays:
509 if replay['rank'] == rank:
510 self.assertFalse(has_replay)
511 has_replay = True
512 checked_replays.add(replay['gid'])
513 if full and not has_replay:
514 if retries <= 0:
515 raise RuntimeError("rank "+str(rank)+" has no standby-replay follower")
516 else:
517 retries = retries-1
518 time.sleep(2)
519 self.assertEqual(checked_replays, set(info['gid'] for info in replays))
520 return status
521
522 def _check_replay_takeover(self, status, rank=0):
523 replay = self.fs.get_replay(rank=rank, status=status)
524 new_status = self.fs.wait_for_daemons()
525 new_active = self.fs.get_rank(rank=rank, status=new_status)
526 if replay:
527 self.assertEqual(replay['gid'], new_active['gid'])
528 else:
529 # double check takeover came from a standby (or some new daemon via restart)
530 found = False
531 for info in status.get_standbys():
532 if info['gid'] == new_active['gid']:
533 found = True
534 break
535 if not found:
536 for info in status.get_all():
537 self.assertNotEqual(info['gid'], new_active['gid'])
538 return new_status
539
540 def test_standby_replay_singleton(self):
541 """
542 That only one MDS becomes standby-replay.
543 """
544
545 self._confirm_no_replay()
546 self.fs.set_allow_standby_replay(True)
547 time.sleep(30)
548 self._confirm_single_replay()
549
550 def test_standby_replay_damaged(self):
551 """
552 That a standby-replay daemon can cause the rank to go damaged correctly.
553 """
554
555 self._confirm_no_replay()
556 self.config_set("mds", "mds_standby_replay_damaged", True)
557 self.fs.set_allow_standby_replay(True)
558 self.wait_until_true(
559 lambda: len(self.fs.get_damaged()) > 0,
560 timeout=30
561 )
562 status = self.fs.status()
563 self.assertListEqual([], list(self.fs.get_ranks(status=status)))
564 self.assertListEqual([0], self.fs.get_damaged(status=status))
565
566 def test_standby_replay_disable(self):
567 """
568 That turning off allow_standby_replay fails all standby-replay daemons.
569 """
570
571 self._confirm_no_replay()
572 self.fs.set_allow_standby_replay(True)
573 time.sleep(30)
574 self._confirm_single_replay()
575 self.fs.set_allow_standby_replay(False)
576 self._confirm_no_replay()
577
578 def test_standby_replay_singleton_fail(self):
579 """
580 That failures don't violate singleton constraint.
581 """
582
583 self._confirm_no_replay()
584 self.fs.set_allow_standby_replay(True)
585 status = self._confirm_single_replay()
586
587 for i in range(10):
588 time.sleep(randint(1, 5))
589 self.fs.rank_restart(status=status)
590 status = self._check_replay_takeover(status)
591 status = self._confirm_single_replay(status=status)
592
593 for i in range(10):
594 time.sleep(randint(1, 5))
595 self.fs.rank_fail()
596 status = self._check_replay_takeover(status)
597 status = self._confirm_single_replay(status=status)
598
599 def test_standby_replay_singleton_fail_multimds(self):
600 """
601 That failures don't violate singleton constraint with multiple actives.
602 """
603
604 status = self._confirm_no_replay()
605 new_max_mds = randint(2, len(list(status.get_standbys())))
606 self.fs.set_max_mds(new_max_mds)
607 self.fs.wait_for_daemons() # wait for actives to come online!
608 self.fs.set_allow_standby_replay(True)
609 status = self._confirm_single_replay(full=False)
610
611 for i in range(10):
612 time.sleep(randint(1, 5))
613 victim = randint(0, new_max_mds-1)
614 self.fs.rank_restart(rank=victim, status=status)
615 status = self._check_replay_takeover(status, rank=victim)
616 status = self._confirm_single_replay(status=status, full=False)
617
618 for i in range(10):
619 time.sleep(randint(1, 5))
620 victim = randint(0, new_max_mds-1)
621 self.fs.rank_fail(rank=victim)
622 status = self._check_replay_takeover(status, rank=victim)
623 status = self._confirm_single_replay(status=status, full=False)
624
625 def test_standby_replay_failure(self):
626 """
627 That the failure of a standby-replay daemon happens cleanly
628 and doesn't interrupt anything else.
629 """
630
631 status = self._confirm_no_replay()
632 self.fs.set_max_mds(1)
633 self.fs.set_allow_standby_replay(True)
634 status = self._confirm_single_replay()
635
636 for i in range(10):
637 time.sleep(randint(1, 5))
638 victim = self.fs.get_replay(status=status)
639 self.fs.mds_restart(mds_id=victim['name'])
640 status = self._confirm_single_replay(status=status)
641
642 def test_standby_replay_prepare_beacon(self):
643 """
644 That a MDSMonitor::prepare_beacon handles standby-replay daemons
645 correctly without removing the standby. (Note, usually a standby-replay
646 beacon will just be replied to by MDSMonitor::preprocess_beacon.)
647 """
648
649 status = self._confirm_no_replay()
650 self.fs.set_max_mds(1)
651 self.fs.set_allow_standby_replay(True)
652 status = self._confirm_single_replay()
653 replays = list(status.get_replays(self.fs.id))
654 self.assertEqual(len(replays), 1)
655 self.config_set('mds.'+replays[0]['name'], 'mds_inject_health_dummy', True)
656 time.sleep(10) # for something not to happen...
657 status = self._confirm_single_replay()
658 replays2 = list(status.get_replays(self.fs.id))
659 self.assertEqual(replays[0]['gid'], replays2[0]['gid'])
660
661 def test_rank_stopped(self):
662 """
663 That when a rank is STOPPED, standby replays for
664 that rank get torn down
665 """
666
667 status = self._confirm_no_replay()
668 standby_count = len(list(status.get_standbys()))
669 self.fs.set_max_mds(2)
670 self.fs.set_allow_standby_replay(True)
671 status = self._confirm_single_replay()
672
673 self.fs.set_max_mds(1) # stop rank 1
674
675 status = self._confirm_single_replay()
676 self.assertTrue(standby_count, len(list(status.get_standbys())))
677
678
679 class TestMultiFilesystems(CephFSTestCase):
680 CLIENTS_REQUIRED = 2
681 MDSS_REQUIRED = 4
682
683 # We'll create our own filesystems and start our own daemons
684 REQUIRE_FILESYSTEM = False
685
686 def setUp(self):
687 super(TestMultiFilesystems, self).setUp()
688 self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
689 "enable_multiple", "true",
690 "--yes-i-really-mean-it")
691
692 def _setup_two(self):
693 fs_a = self.mds_cluster.newfs(name="alpha")
694 fs_b = self.mds_cluster.newfs(name="bravo")
695
696 self.mds_cluster.mds_restart()
697
698 # Wait for both filesystems to go healthy
699 fs_a.wait_for_daemons()
700 fs_b.wait_for_daemons()
701
702 # Reconfigure client auth caps
703 for mount in self.mounts:
704 self.mds_cluster.mon_manager.raw_cluster_cmd_result(
705 'auth', 'caps', "client.{0}".format(mount.client_id),
706 'mds', 'allow',
707 'mon', 'allow r',
708 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
709 fs_a.get_data_pool_name(), fs_b.get_data_pool_name()))
710
711 return fs_a, fs_b
712
713 def test_clients(self):
714 fs_a, fs_b = self._setup_two()
715
716 # Mount a client on fs_a
717 self.mount_a.mount_wait(cephfs_name=fs_a.name)
718 self.mount_a.write_n_mb("pad.bin", 1)
719 self.mount_a.write_n_mb("test.bin", 2)
720 a_created_ino = self.mount_a.path_to_ino("test.bin")
721 self.mount_a.create_files()
722
723 # Mount a client on fs_b
724 self.mount_b.mount_wait(cephfs_name=fs_b.name)
725 self.mount_b.write_n_mb("test.bin", 1)
726 b_created_ino = self.mount_b.path_to_ino("test.bin")
727 self.mount_b.create_files()
728
729 # Check that a non-default filesystem mount survives an MDS
730 # failover (i.e. that map subscription is continuous, not
731 # just the first time), reproduces #16022
732 old_fs_b_mds = fs_b.get_active_names()[0]
733 self.mds_cluster.mds_stop(old_fs_b_mds)
734 self.mds_cluster.mds_fail(old_fs_b_mds)
735 fs_b.wait_for_daemons()
736 background = self.mount_b.write_background()
737 # Raise exception if the write doesn't finish (i.e. if client
738 # has not kept up with MDS failure)
739 try:
740 self.wait_until_true(lambda: background.finished, timeout=30)
741 except RuntimeError:
742 # The mount is stuck, we'll have to force it to fail cleanly
743 background.stdin.close()
744 self.mount_b.umount_wait(force=True)
745 raise
746
747 self.mount_a.umount_wait()
748 self.mount_b.umount_wait()
749
750 # See that the client's files went into the correct pool
751 self.assertTrue(fs_a.data_objects_present(a_created_ino, 1024 * 1024))
752 self.assertTrue(fs_b.data_objects_present(b_created_ino, 1024 * 1024))
753
754 def test_standby(self):
755 fs_a, fs_b = self._setup_two()
756
757 # Assert that the remaining two MDS daemons are now standbys
758 a_daemons = fs_a.get_active_names()
759 b_daemons = fs_b.get_active_names()
760 self.assertEqual(len(a_daemons), 1)
761 self.assertEqual(len(b_daemons), 1)
762 original_a = a_daemons[0]
763 original_b = b_daemons[0]
764 expect_standby_daemons = set(self.mds_cluster.mds_ids) - (set(a_daemons) | set(b_daemons))
765
766 # Need all my standbys up as well as the active daemons
767 self.wait_for_daemon_start()
768 self.assertEqual(expect_standby_daemons, self.mds_cluster.get_standby_daemons())
769
770 # Kill fs_a's active MDS, see a standby take over
771 self.mds_cluster.mds_stop(original_a)
772 self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_a)
773 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
774 reject_fn=lambda v: v > 1)
775 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
776 self.assertNotEqual(fs_a.get_active_names()[0], original_a)
777
778 # Kill fs_b's active MDS, see a standby take over
779 self.mds_cluster.mds_stop(original_b)
780 self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_b)
781 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
782 reject_fn=lambda v: v > 1)
783 # Assert that it's a *different* daemon that has now appeared in the map for fs_a
784 self.assertNotEqual(fs_b.get_active_names()[0], original_b)
785
786 # Both of the original active daemons should be gone, and all standbys used up
787 self.assertEqual(self.mds_cluster.get_standby_daemons(), set())
788
789 # Restart the ones I killed, see them reappear as standbys
790 self.mds_cluster.mds_restart(original_a)
791 self.mds_cluster.mds_restart(original_b)
792 self.wait_until_true(
793 lambda: {original_a, original_b} == self.mds_cluster.get_standby_daemons(),
794 timeout=30
795 )
796
797 def test_grow_shrink(self):
798 # Usual setup...
799 fs_a, fs_b = self._setup_two()
800
801 # Increase max_mds on fs_b, see a standby take up the role
802 fs_b.set_max_mds(2)
803 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 2, 30,
804 reject_fn=lambda v: v > 2 or v < 1)
805
806 # Increase max_mds on fs_a, see a standby take up the role
807 fs_a.set_max_mds(2)
808 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 2, 30,
809 reject_fn=lambda v: v > 2 or v < 1)
810
811 # Shrink fs_b back to 1, see a daemon go back to standby
812 fs_b.set_max_mds(1)
813 self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
814 reject_fn=lambda v: v > 2 or v < 1)
815
816 # Grow fs_a up to 3, see the former fs_b daemon join it.
817 fs_a.set_max_mds(3)
818 self.wait_until_equal(lambda: len(fs_a.get_active_names()), 3, 60,
819 reject_fn=lambda v: v > 3 or v < 2)