]> git.proxmox.com Git - ceph.git/blame - ceph/qa/tasks/cephfs/test_recovery_fs.py
bump version to 18.2.2-pve1
[ceph.git] / ceph / qa / tasks / cephfs / test_recovery_fs.py
CommitLineData
20effc67
TL
1import logging
2from os.path import join as os_path_join
3
4from tasks.cephfs.cephfs_test_case import CephFSTestCase
5
6log = logging.getLogger(__name__)
7
8class TestFSRecovery(CephFSTestCase):
9 """
10 Tests for recovering FS after loss of FSMap
11 """
12
13 CLIENTS_REQUIRED = 1
14 MDSS_REQUIRED = 3
15
16 def test_recover_fs_after_fsmap_removal(self):
17 data_pool = self.fs.get_data_pool_name()
18 metadata_pool = self.fs.get_metadata_pool_name()
19 # write data in mount, and fsync
20 self.mount_a.create_n_files('file_on_fs', 1, sync=True)
21 # faild MDSs to allow removing the file system in the next step
22 self.fs.fail()
23 # Remove file system to lose FSMap and keep the pools intact.
24 # This mimics the scenario where the monitor store is rebuilt
25 # using OSDs to recover a cluster with corrupt monitor store.
26 # The FSMap is permanently lost, but the FS pools are
27 # recovered/intact
28 self.fs.rm()
29 # Recreate file system with pool and previous fscid
30 self.fs.mon_manager.raw_cluster_cmd(
31 'fs', 'new', self.fs.name, metadata_pool, data_pool,
32 '--recover', '--force', '--fscid', f'{self.fs.id}')
33 self.fs.set_joinable()
34 # Check status of file system
35 self.fs.wait_for_daemons()
36 # check data in file sytem is intact
37 filepath = os_path_join(self.mount_a.hostfs_mntpt, 'file_on_fs_0')
38 self.assertEqual(self.mount_a.read_file(filepath), "0")