]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/pybind/mgr/cephadm/tests/test_migration.py
import quincy beta 17.1.0
[ceph.git] / ceph / src / pybind / mgr / cephadm / tests / test_migration.py
index 168e44e749c2789b2c0819c8c3ad59312c97c48c..1c73897cb852734e3080ac31dcd1540b99c98d73 100644 (file)
@@ -4,7 +4,8 @@ from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlaceme
 from ceph.utils import datetime_to_str, datetime_now
 from cephadm import CephadmOrchestrator
 from cephadm.inventory import SPEC_STORE_PREFIX
-from cephadm.tests.fixtures import _run_cephadm, wait, with_host
+from cephadm.migrations import LAST_MIGRATION
+from cephadm.tests.fixtures import _run_cephadm, wait, with_host, receive_agent_metadata_all_hosts
 from cephadm.serve import CephadmServe
 from tests import mock
 
@@ -29,6 +30,7 @@ def test_migrate_scheduler(cephadm_module: CephadmOrchestrator):
             assert cephadm_module.migration_current == 0
 
             CephadmServe(cephadm_module)._refresh_hosts_and_daemons()
+            receive_agent_metadata_all_hosts(cephadm_module)
             cephadm_module.migration.migrate()
 
             CephadmServe(cephadm_module)._apply_all_services()
@@ -48,7 +50,7 @@ def test_migrate_scheduler(cephadm_module: CephadmOrchestrator):
 
             cephadm_module.migration_current = 0
             cephadm_module.migration.migrate()
-            assert cephadm_module.migration_current == 2
+            assert cephadm_module.migration_current >= 2
 
             out = [o.spec.placement for o in wait(
                 cephadm_module, cephadm_module.describe_service())]
@@ -78,7 +80,7 @@ def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator):
 
         cephadm_module.migration_current = 1
         cephadm_module.migration.migrate()
-        assert cephadm_module.migration_current == 2
+        assert cephadm_module.migration_current >= 2
 
         assert len(cephadm_module.spec_store.all_specs) == 1
         assert cephadm_module.spec_store.all_specs['mon'] == ServiceSpec(
@@ -121,7 +123,7 @@ def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator):
 
         cephadm_module.migration_current = 1
         cephadm_module.migration.migrate()
-        assert cephadm_module.migration_current == 2
+        assert cephadm_module.migration_current >= 2
 
         assert len(cephadm_module.spec_store.all_specs) == 1
         assert cephadm_module.spec_store.all_specs['mon'] == ServiceSpec(
@@ -149,3 +151,80 @@ def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator):
 
         # there is nothing to migrate, as the spec is gone now.
         assert len(cephadm_module.spec_store.all_specs) == 0
+
+
+@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+def test_migrate_nfs_initial(cephadm_module: CephadmOrchestrator):
+    with with_host(cephadm_module, 'host1'):
+        cephadm_module.set_store(
+            SPEC_STORE_PREFIX + 'mds',
+            json.dumps({
+                'spec': {
+                    'service_type': 'nfs',
+                    'service_id': 'foo',
+                    'placement': {
+                        'hosts': ['host1']
+                    },
+                    'spec': {
+                        'pool': 'mypool',
+                        'namespace': 'foons',
+                    },
+                },
+                'created': datetime_to_str(datetime_now()),
+            }, sort_keys=True),
+        )
+        cephadm_module.migration_current = 1
+        cephadm_module.spec_store.load()
+
+        ls = json.loads(cephadm_module.get_store('nfs_migration_queue'))
+        assert ls == [['foo', 'mypool', 'foons']]
+
+        cephadm_module.migration.migrate(True)
+        assert cephadm_module.migration_current == 2
+
+        cephadm_module.migration.migrate()
+        assert cephadm_module.migration_current == LAST_MIGRATION
+
+
+@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+def test_migrate_nfs_initial_octopus(cephadm_module: CephadmOrchestrator):
+    with with_host(cephadm_module, 'host1'):
+        cephadm_module.set_store(
+            SPEC_STORE_PREFIX + 'mds',
+            json.dumps({
+                'spec': {
+                    'service_type': 'nfs',
+                    'service_id': 'ganesha-foo',
+                    'placement': {
+                        'hosts': ['host1']
+                    },
+                    'spec': {
+                        'pool': 'mypool',
+                        'namespace': 'foons',
+                    },
+                },
+                'created': datetime_to_str(datetime_now()),
+            }, sort_keys=True),
+        )
+        cephadm_module.migration_current = 1
+        cephadm_module.spec_store.load()
+
+        ls = json.loads(cephadm_module.get_store('nfs_migration_queue'))
+        assert ls == [['ganesha-foo', 'mypool', 'foons']]
+
+        cephadm_module.migration.migrate(True)
+        assert cephadm_module.migration_current == 2
+
+        cephadm_module.migration.migrate()
+        assert cephadm_module.migration_current == LAST_MIGRATION
+
+
+@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+def test_migrate_admin_client_keyring(cephadm_module: CephadmOrchestrator):
+    assert 'client.admin' not in cephadm_module.keys.keys
+
+    cephadm_module.migration_current = 3
+    cephadm_module.migration.migrate()
+    assert cephadm_module.migration_current == LAST_MIGRATION
+
+    assert cephadm_module.keys.keys['client.admin'].placement.label == '_admin'