X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=ceph%2Fsrc%2Fpython-common%2Fceph%2Fdeployment%2Ftranslate.py;fp=ceph%2Fsrc%2Fpython-common%2Fceph%2Fdeployment%2Ftranslate.py;h=20d36908cac7be7c97b526ea816de0b31dc0eb7d;hb=33c7a0ef2143973309014ab28861a6fa401a5aa5;hp=6ca440cff3bf929a0b48e1c2150e21895b868cd7;hpb=334454b92d2232c93065b03e4978815c1ddcef44;p=ceph.git diff --git a/ceph/src/python-common/ceph/deployment/translate.py b/ceph/src/python-common/ceph/deployment/translate.py index 6ca440cff..20d36908c 100644 --- a/ceph/src/python-common/ceph/deployment/translate.py +++ b/ceph/src/python-common/ceph/deployment/translate.py @@ -25,7 +25,7 @@ class to_ceph_volume(object): self.osd_id_claims = osd_id_claims def run(self): - # type: () -> Optional[str] + # type: () -> List[str] """ Generate ceph-volume commands based on the DriveGroup filters """ data_devices = [x.path for x in self.selection.data_devices()] db_devices = [x.path for x in self.selection.db_devices()] @@ -33,19 +33,34 @@ class to_ceph_volume(object): journal_devices = [x.path for x in self.selection.journal_devices()] if not data_devices: - return None + return [] - cmd = "" + cmds: List[str] = [] if self.spec.method == 'raw': assert self.spec.objectstore == 'bluestore' - cmd = "raw prepare --bluestore" - cmd += " --data {}".format(" ".join(data_devices)) - if db_devices: - cmd += " --block.db {}".format(" ".join(db_devices)) - if wal_devices: - cmd += " --block.wal {}".format(" ".join(wal_devices)) + # ceph-volume raw prepare only support 1:1 ratio of data to db/wal devices + if data_devices and db_devices: + if len(data_devices) != len(db_devices): + raise ValueError('Number of data devices must match number of ' + 'db devices for raw mode osds') + if data_devices and wal_devices: + if len(data_devices) != len(wal_devices): + raise ValueError('Number of data devices must match number of ' + 'wal devices for raw mode osds') + # for raw prepare each data device needs its own prepare command + dev_counter = 0 + while dev_counter < len(data_devices): + cmd = "raw prepare --bluestore" + cmd += " --data {}".format(data_devices[dev_counter]) + if db_devices: + cmd += " --block.db {}".format(db_devices[dev_counter]) + if wal_devices: + cmd += " --block.wal {}".format(wal_devices[dev_counter]) + cmds.append(cmd) + dev_counter += 1 elif self.spec.objectstore == 'filestore': + # for lvm batch we can just do all devices in one command cmd = "lvm batch --no-auto" cmd += " {}".format(" ".join(data_devices)) @@ -58,9 +73,10 @@ class to_ceph_volume(object): ' '.join(journal_devices)) cmd += " --filestore" + cmds.append(cmd) elif self.spec.objectstore == 'bluestore': - + # for lvm batch we can just do all devices in one command cmd = "lvm batch --no-auto {}".format(" ".join(data_devices)) if db_devices: @@ -74,25 +90,27 @@ class to_ceph_volume(object): if self.spec.block_db_size: cmd += " --block-db-size {}".format(self.spec.block_db_size) + cmds.append(cmd) - if self.spec.encrypted: - cmd += " --dmcrypt" + for i in range(len(cmds)): + if self.spec.encrypted: + cmds[i] += " --dmcrypt" - if self.spec.osds_per_device: - cmd += " --osds-per-device {}".format(self.spec.osds_per_device) + if self.spec.osds_per_device: + cmds[i] += " --osds-per-device {}".format(self.spec.osds_per_device) - if self.spec.data_allocate_fraction: - cmd += " --data-allocate-fraction {}".format(self.spec.data_allocate_fraction) + if self.spec.data_allocate_fraction: + cmds[i] += " --data-allocate-fraction {}".format(self.spec.data_allocate_fraction) - if self.osd_id_claims: - cmd += " --osd-ids {}".format(" ".join(self.osd_id_claims)) + if self.osd_id_claims: + cmds[i] += " --osd-ids {}".format(" ".join(self.osd_id_claims)) - if self.spec.method != 'raw': - cmd += " --yes" - cmd += " --no-systemd" + if self.spec.method != 'raw': + cmds[i] += " --yes" + cmds[i] += " --no-systemd" - if self.preview: - cmd += " --report" - cmd += " --format json" + if self.preview: + cmds[i] += " --report" + cmds[i] += " --format json" - return cmd + return cmds