]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/python-common/ceph/deployment/translate.py
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / python-common / ceph / deployment / translate.py
index 0f404af305b103e6ad14649027bc6b73a9698169..86243b8aefda3740ce71d82a787d4144bf5a11eb 100644 (file)
@@ -95,7 +95,6 @@ class to_ceph_volume(object):
 
         db_devices = [x.path for x in self.selection.db_devices()]
         wal_devices = [x.path for x in self.selection.wal_devices()]
-        journal_devices = [x.path for x in self.selection.journal_devices()]
 
         if not self.selection.data_devices():
             return []
@@ -116,73 +115,53 @@ class to_ceph_volume(object):
                 raise ValueError('Number of data devices must match number of '
                                  'wal devices for raw mode osds')
 
-        # For this use case we don't apply any custom crush_device_classes
-        # Note that filestore is not supported anymore by the DriveGroupSpec
-        if self.spec.objectstore == 'filestore':
-            # for lvm batch we can just do all devices in one command
-            devs: List = sum(list(devices.values()), [])
-
-            cmd = "lvm batch --no-auto"
-
-            cmd += " {}".format(" ".join(devs))
-
-            if self.spec.journal_size:
-                cmd += " --journal-size {}".format(self.spec.journal_size)
-
-            if journal_devices:
-                cmd += " --journal-devices {}".format(
-                        ' '.join(journal_devices))
-
-            cmd += " --filestore"
-            cmds.append(cmd)
-        else:
-            for d in devices.keys():
-                data_devices: Optional[List[str]] = devices.get(d)
-                if not data_devices:
-                    continue
-
-                if self.spec.method == 'raw':
-                    assert self.spec.objectstore == 'bluestore'
-                    # ceph-volume raw prepare only support 1:1 ratio of data to db/wal devices
-                    # for raw prepare each data device needs its own prepare command
-                    dev_counter = 0
-                    # reversing the lists as we're assigning db_devices sequentially
-                    db_devices.reverse()
-                    wal_devices.reverse()
-
-                    while dev_counter < len(data_devices):
-                        cmd = "raw prepare --bluestore"
-                        cmd += " --data {}".format(data_devices[dev_counter])
-                        if db_devices:
-                            cmd += " --block.db {}".format(db_devices.pop())
-                        if wal_devices:
-                            cmd += " --block.wal {}".format(wal_devices.pop())
-                        if d in self._supported_device_classes:
-                            cmd += " --crush-device-class {}".format(d)
-
-                        cmds.append(cmd)
-                        dev_counter += 1
-
-                elif self.spec.objectstore == 'bluestore':
-                    # for lvm batch we can just do all devices in one command
-
-                    cmd = "lvm batch --no-auto {}".format(" ".join(data_devices))
+        for d in devices.keys():
+            data_devices: Optional[List[str]] = devices.get(d)
+            if not data_devices:
+                continue
 
+            if self.spec.method == 'raw':
+                assert self.spec.objectstore == 'bluestore'
+                # ceph-volume raw prepare only support 1:1 ratio of data to db/wal devices
+                # for raw prepare each data device needs its own prepare command
+                dev_counter = 0
+                # reversing the lists as we're assigning db_devices sequentially
+                db_devices.reverse()
+                wal_devices.reverse()
+
+                while dev_counter < len(data_devices):
+                    cmd = "raw prepare --bluestore"
+                    cmd += " --data {}".format(data_devices[dev_counter])
                     if db_devices:
-                        cmd += " --db-devices {}".format(" ".join(db_devices))
-
+                        cmd += " --block.db {}".format(db_devices.pop())
                     if wal_devices:
-                        cmd += " --wal-devices {}".format(" ".join(wal_devices))
-
-                    if self.spec.block_wal_size:
-                        cmd += " --block-wal-size {}".format(self.spec.block_wal_size)
-
-                    if self.spec.block_db_size:
-                        cmd += " --block-db-size {}".format(self.spec.block_db_size)
-
+                        cmd += " --block.wal {}".format(wal_devices.pop())
                     if d in self._supported_device_classes:
                         cmd += " --crush-device-class {}".format(d)
+
                     cmds.append(cmd)
+                    dev_counter += 1
+
+            elif self.spec.objectstore == 'bluestore':
+                # for lvm batch we can just do all devices in one command
+
+                cmd = "lvm batch --no-auto {}".format(" ".join(data_devices))
+
+                if db_devices:
+                    cmd += " --db-devices {}".format(" ".join(db_devices))
+
+                if wal_devices:
+                    cmd += " --wal-devices {}".format(" ".join(wal_devices))
+
+                if self.spec.block_wal_size:
+                    cmd += " --block-wal-size {}".format(self.spec.block_wal_size)
+
+                if self.spec.block_db_size:
+                    cmd += " --block-db-size {}".format(self.spec.block_db_size)
+
+                if d in self._supported_device_classes:
+                    cmd += " --crush-device-class {}".format(d)
+                cmds.append(cmd)
 
         for i in range(len(cmds)):
             if self.spec.encrypted: